Merge pull request #9620 from tiborvass/merge_release_v1.3.3

Merge release v1.3.3
This commit is contained in:
Michael Crosby 2014-12-11 13:53:32 -08:00
commit 4d318db749
26 changed files with 1284 additions and 341 deletions

View file

@ -1,5 +1,19 @@
# Changelog
## 1.3.3 (2014-12-11)
#### Security
- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356)
- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357)
- Validate image IDs (CVE-2014-9358)
#### Runtime
- Fix an issue when image archives are being read slowly
#### Client
- Fix a regression related to stdin redirection
- Fix a regression with `docker cp` when destination is the current directory
## 1.3.2 (2014-11-20)
#### Security

View file

@ -1 +1 @@
1.3.2-dev
1.3.3-dev

View file

@ -4,6 +4,40 @@ page_keywords: docker, documentation, about, technology, understanding, release
#Release Notes
##Version 1.3.3
(2014-12-11)
This release fixes several security issues. In order to encourage immediate
upgrading, this release also patches some critical bugs. All users are highly
encouraged to upgrade as soon as possible.
*Security fixes*
Patches and changes were made to address the following vulnerabilities:
* CVE-2014-9356: Path traversal during processing of absolute symlinks.
Absolute symlinks were not adequately checked for traversal which created a
vulnerability via image extraction and/or volume mounts.
* CVE-2014-9357: Escalation of privileges during decompression of LZMA (.xz)
archives. Docker 1.3.2 added `chroot` for archive extraction. This created a
vulnerability that could allow malicious images or builds to write files to the
host system and escape containerization, leading to privilege escalation.
* CVE-2014-9358: Path traversal and spoofing opportunities via image
identifiers. Image IDs passed either via `docker load` or registry communications
were not sufficiently validated. This created a vulnerability to path traversal
attacks wherein malicious images or repository spoofing could lead to graph
corruption and manipulation.
*Runtime fixes*
* Fixed an issue that cause image archives to be read slowly.
*Client fixes*
* Fixed a regression related to STDIN redirection.
* Fixed a regression involving `docker cp` when the current directory is the
destination.
##Version 1.3.2
(2014-11-24)

View file

@ -14,6 +14,7 @@ import (
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/utils"
)
// Loads a set of images into the repository. This is the complementary of ImageExport.
@ -114,6 +115,10 @@ func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string
log.Debugf("Error unmarshalling json", err)
return err
}
if err := utils.ValidateID(img.ID); err != nil {
log.Debugf("Error validating ID: %s", err)
return err
}
if img.Parent != "" {
if !s.graph.Exists(img.Parent) {
if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil {

View file

@ -16,7 +16,7 @@ import (
const (
testImageName = "myapp"
testImageID = "foo"
testImageID = "1a2d3c4d4e5fa2d2a21acea242a5e2345d3aefc3e7dfa2a2a2a21a2a2ad2d234"
)
func fakeTar() (io.Reader, error) {

View file

@ -1310,6 +1310,133 @@ COPY https://index.docker.io/robots.txt /`,
logDone("build - copy - disallow copy from remote")
}
func TestBuildAddBadLinks(t *testing.T) {
const (
dockerfile = `
FROM scratch
ADD links.tar /
ADD foo.txt /symlink/
`
targetFile = "foo.txt"
)
var (
name = "test-link-absolute"
)
defer deleteImages(name)
ctx, err := fakeContext(dockerfile, nil)
if err != nil {
t.Fatal(err)
}
defer ctx.Close()
tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-")
if err != nil {
t.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
symlinkTarget := fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir)
tarPath := filepath.Join(ctx.Dir, "links.tar")
nonExistingFile := filepath.Join(tempDir, targetFile)
fooPath := filepath.Join(ctx.Dir, targetFile)
tarOut, err := os.Create(tarPath)
if err != nil {
t.Fatal(err)
}
tarWriter := tar.NewWriter(tarOut)
header := &tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: symlinkTarget,
Mode: 0755,
Uid: 0,
Gid: 0,
}
err = tarWriter.WriteHeader(header)
if err != nil {
t.Fatal(err)
}
tarWriter.Close()
tarOut.Close()
foo, err := os.Create(fooPath)
if err != nil {
t.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
t.Fatal(err)
}
if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
logDone("build - ADD must add files in container")
}
func TestBuildAddBadLinksVolume(t *testing.T) {
const (
dockerfileTemplate = `
FROM busybox
RUN ln -s /../../../../../../../../%s /x
VOLUME /x
ADD foo.txt /x/`
targetFile = "foo.txt"
)
var (
name = "test-link-absolute-volume"
dockerfile = ""
)
defer deleteImages(name)
tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-")
if err != nil {
t.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir)
nonExistingFile := filepath.Join(tempDir, targetFile)
ctx, err := fakeContext(dockerfile, nil)
if err != nil {
t.Fatal(err)
}
defer ctx.Close()
fooPath := filepath.Join(ctx.Dir, targetFile)
foo, err := os.Create(fooPath)
if err != nil {
t.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
t.Fatal(err)
}
if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
logDone("build - ADD should add files in volume")
}
// Issue #5270 - ensure we throw a better error than "unexpected EOF"
// when we can't access files in the context.
func TestBuildWithInaccessibleFilesInContext(t *testing.T) {
@ -3177,6 +3304,118 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi`
logDone("build - ADD tar")
}
func TestBuildAddTarXz(t *testing.T) {
name := "testbuildaddtarxz"
defer deleteImages(name)
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar.xz /
RUN cat /test/foo | grep Hi`
tmpDir, err := ioutil.TempDir("", "fake-context")
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
t.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
t.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
t.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatalf("failed to close tar archive: %v", err)
}
xzCompressCmd := exec.Command("xz", "test.tar")
xzCompressCmd.Dir = tmpDir
out, _, err := runCommandWithOutput(xzCompressCmd)
if err != nil {
t.Fatal(err, out)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
t.Fatalf("failed to open destination dockerfile: %v", err)
}
return &FakeContext{Dir: tmpDir}
}()
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
}
logDone("build - ADD tar.xz")
}
func TestBuildAddTarXzGz(t *testing.T) {
name := "testbuildaddtarxzgz"
defer deleteImages(name)
ctx := func() *FakeContext {
dockerfile := `
FROM busybox
ADD test.tar.xz.gz /
RUN ls /test.tar.xz.gz`
tmpDir, err := ioutil.TempDir("", "fake-context")
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
t.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
t.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
t.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatalf("failed to close tar archive: %v", err)
}
xzCompressCmd := exec.Command("xz", "test.tar")
xzCompressCmd.Dir = tmpDir
out, _, err := runCommandWithOutput(xzCompressCmd)
if err != nil {
t.Fatal(err, out)
}
gzipCompressCmd := exec.Command("gzip", "test.tar.xz")
gzipCompressCmd.Dir = tmpDir
out, _, err = runCommandWithOutput(gzipCompressCmd)
if err != nil {
t.Fatal(err, out)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
t.Fatalf("failed to open destination dockerfile: %v", err)
}
return &FakeContext{Dir: tmpDir}
}()
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
}
logDone("build - ADD tar.xz.gz")
}
func TestBuildFromGIT(t *testing.T) {
name := "testbuildfromgit"
defer deleteImages(name)
@ -3592,3 +3831,86 @@ RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ]
logDone("build - change permission on single file")
}
func TestBuildSymlinkBreakout(t *testing.T) {
name := "testbuildsymlinkbreakout"
tmpdir, err := ioutil.TempDir("", name)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
ctx := filepath.Join(tmpdir, "context")
if err := os.MkdirAll(ctx, 0755); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(`
from busybox
add symlink.tar /
add inject /symlink/
`), 0644); err != nil {
t.Fatal(err)
}
inject := filepath.Join(ctx, "inject")
if err := ioutil.WriteFile(inject, nil, 0644); err != nil {
t.Fatal(err)
}
f, err := os.Create(filepath.Join(ctx, "symlink.tar"))
if err != nil {
t.Fatal(err)
}
w := tar.NewWriter(f)
w.WriteHeader(&tar.Header{
Name: "symlink2",
Typeflag: tar.TypeSymlink,
Linkname: "/../../../../../../../../../../../../../../",
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.WriteHeader(&tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: filepath.Join("symlink2", tmpdir),
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.Close()
f.Close()
if _, err := buildImageFromContext(name, &FakeContext{Dir: ctx}, false); err != nil {
t.Fatal(err)
}
if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil {
t.Fatal("symlink breakout - inject")
} else if !os.IsNotExist(err) {
t.Fatalf("unexpected error: %v", err)
}
logDone("build - symlink breakout")
}
func TestBuildXZHost(t *testing.T) {
name := "testbuildxzhost"
defer deleteImages(name)
ctx, err := fakeContext(`
FROM busybox
ADD xz /usr/local/sbin/
RUN chmod 755 /usr/local/sbin/xz
ADD test.xz /
RUN [ ! -e /injected ]`,
map[string]string{
"test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" +
"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" +
"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21",
"xz": "#!/bin/sh\ntouch /injected",
})
if err != nil {
t.Fatal(err)
}
defer ctx.Close()
if _, err := buildImageFromContext(name, ctx, true); err != nil {
t.Fatal(err)
}
logDone("build - xz host is being used")
}

View file

@ -104,6 +104,140 @@ func TestSaveAndLoadRepoStdout(t *testing.T) {
logDone("save - do not save to a tty")
}
// save a repo using gz compression and try to load it using stdout
func TestSaveXzAndLoadRepoStdout(t *testing.T) {
tempDir, err := ioutil.TempDir("", "test-save-xz-gz-load-repo-stdout")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
tarballPath := filepath.Join(tempDir, "foobar-save-load-test.tar.xz.gz")
runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
t.Fatalf("failed to create a container: %v %v", out, err)
}
cleanedContainerID := stripTrailingCharacters(out)
repoName := "foobar-save-load-test-xz-gz"
inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
out, _, err = runCommandWithOutput(inspectCmd)
if err != nil {
t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err)
}
commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
out, _, err = runCommandWithOutput(commitCmd)
if err != nil {
t.Fatalf("failed to commit container: %v %v", out, err)
}
inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
before, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
t.Fatalf("the repo should exist before saving it: %v %v", before, err)
}
saveCmdTemplate := `%v save %v | xz -c | gzip -c > %s`
saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName, tarballPath)
saveCmd := exec.Command("bash", "-c", saveCmdFinal)
out, _, err = runCommandWithOutput(saveCmd)
if err != nil {
t.Fatalf("failed to save repo: %v %v", out, err)
}
deleteImages(repoName)
loadCmdFinal := fmt.Sprintf(`cat %s | docker load`, tarballPath)
loadCmd := exec.Command("bash", "-c", loadCmdFinal)
out, _, err = runCommandWithOutput(loadCmd)
if err == nil {
t.Fatalf("expected error, but succeeded with no error and output: %v", out)
}
inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
after, _, err := runCommandWithOutput(inspectCmd)
if err == nil {
t.Fatalf("the repo should not exist: %v", after)
}
deleteContainer(cleanedContainerID)
deleteImages(repoName)
logDone("load - save a repo with xz compression & load it using stdout")
}
// save a repo using xz+gz compression and try to load it using stdout
func TestSaveXzGzAndLoadRepoStdout(t *testing.T) {
tempDir, err := ioutil.TempDir("", "test-save-xz-gz-load-repo-stdout")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
tarballPath := filepath.Join(tempDir, "foobar-save-load-test.tar.xz.gz")
runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
if err != nil {
t.Fatalf("failed to create a container: %v %v", out, err)
}
cleanedContainerID := stripTrailingCharacters(out)
repoName := "foobar-save-load-test-xz-gz"
inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
out, _, err = runCommandWithOutput(inspectCmd)
if err != nil {
t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err)
}
commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
out, _, err = runCommandWithOutput(commitCmd)
if err != nil {
t.Fatalf("failed to commit container: %v %v", out, err)
}
inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
before, _, err := runCommandWithOutput(inspectCmd)
if err != nil {
t.Fatalf("the repo should exist before saving it: %v %v", before, err)
}
saveCmdTemplate := `%v save %v | xz -c | gzip -c > %s`
saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName, tarballPath)
saveCmd := exec.Command("bash", "-c", saveCmdFinal)
out, _, err = runCommandWithOutput(saveCmd)
if err != nil {
t.Fatalf("failed to save repo: %v %v", out, err)
}
deleteImages(repoName)
loadCmdFinal := fmt.Sprintf(`cat %s | docker load`, tarballPath)
loadCmd := exec.Command("bash", "-c", loadCmdFinal)
out, _, err = runCommandWithOutput(loadCmd)
if err == nil {
t.Fatalf("expected error, but succeeded with no error and output: %v", out)
}
inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
after, _, err := runCommandWithOutput(inspectCmd)
if err == nil {
t.Fatalf("the repo should not exist: %v", after)
}
deleteContainer(cleanedContainerID)
deleteImages(repoName)
logDone("load - save a repo with xz+gz compression & load it using stdout")
}
func TestSaveSingleTag(t *testing.T) {
repoName := "foobar-save-single-tag-test"

View file

@ -464,32 +464,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
return pipeReader, nil
}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
// FIXME: specify behavior when target path exists vs. doesn't exist.
func Untar(archive io.Reader, dest string, options *TarOptions) error {
dest = filepath.Clean(dest)
if options == nil {
options = &TarOptions{}
}
if archive == nil {
return fmt.Errorf("Empty archive")
}
if options.Excludes == nil {
options.Excludes = []string{}
}
decompressedArchive, err := DecompressStream(archive)
if err != nil {
return err
}
defer decompressedArchive.Close()
func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
tr := tar.NewReader(decompressedArchive)
trBuf := pools.BufioReader32KPool.Get(nil)
defer pools.BufioReader32KPool.Put(trBuf)
@ -572,10 +547,33 @@ loop:
return err
}
}
return nil
}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
// FIXME: specify behavior when target path exists vs. doesn't exist.
func Untar(archive io.Reader, dest string, options *TarOptions) error {
if archive == nil {
return fmt.Errorf("Empty archive")
}
dest = filepath.Clean(dest)
if options == nil {
options = &TarOptions{}
}
if options.Excludes == nil {
options.Excludes = []string{}
}
decompressedArchive, err := DecompressStream(archive)
if err != nil {
return err
}
defer decompressedArchive.Close()
return Unpack(decompressedArchive, dest, options)
}
func (archiver *Archiver) TarUntar(src, dst string) error {
log.Debugf("TarUntar(%s %s)", src, dst)
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})

View file

@ -15,24 +15,7 @@ import (
"github.com/docker/docker/pkg/system"
)
// ApplyLayer parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`.
func ApplyLayer(dest string, layer ArchiveReader) error {
dest = filepath.Clean(dest)
// We need to be able to set any perms
oldmask, err := system.Umask(0)
if err != nil {
return err
}
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
layer, err = DecompressStream(layer)
if err != nil {
return err
}
func UnpackLayer(dest string, layer ArchiveReader) error {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
@ -159,6 +142,24 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
return err
}
}
return nil
}
// ApplyLayer parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`.
func ApplyLayer(dest string, layer ArchiveReader) error {
dest = filepath.Clean(dest)
// We need to be able to set any perms
oldmask, err := system.Umask(0)
if err != nil {
return err
}
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
layer, err = DecompressStream(layer)
if err != nil {
return err
}
return UnpackLayer(dest, layer)
}

View file

@ -7,6 +7,7 @@ import (
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"syscall"
@ -15,22 +16,26 @@ import (
"github.com/docker/docker/pkg/reexec"
)
var chrootArchiver = &archive.Archiver{Untar}
func chroot(path string) error {
if err := syscall.Chroot(path); err != nil {
return err
}
return syscall.Chdir("/")
}
func untar() {
runtime.LockOSThread()
flag.Parse()
if err := syscall.Chroot(flag.Arg(0)); err != nil {
if err := chroot(flag.Arg(0)); err != nil {
fatal(err)
}
if err := syscall.Chdir("/"); err != nil {
var options *archive.TarOptions
if err := json.NewDecoder(strings.NewReader(flag.Arg(1))).Decode(&options); err != nil {
fatal(err)
}
options := new(archive.TarOptions)
dec := json.NewDecoder(strings.NewReader(flag.Arg(1)))
if err := dec.Decode(options); err != nil {
fatal(err)
}
if err := archive.Untar(os.Stdin, "/", options); err != nil {
if err := archive.Unpack(os.Stdin, "/", options); err != nil {
fatal(err)
}
// fully consume stdin in case it is zero padded
@ -38,13 +43,21 @@ func untar() {
os.Exit(0)
}
var (
chrootArchiver = &archive.Archiver{Untar}
)
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
if tarArchive == nil {
return fmt.Errorf("Empty archive")
}
if options == nil {
options = &archive.TarOptions{}
}
if options.Excludes == nil {
options.Excludes = []string{}
}
func Untar(archive io.Reader, dest string, options *archive.TarOptions) error {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
var (
buf bytes.Buffer
enc = json.NewEncoder(&buf)
)
if err := enc.Encode(options); err != nil {
return fmt.Errorf("Untar json encode: %v", err)
}
@ -53,9 +66,15 @@ func Untar(archive io.Reader, dest string, options *archive.TarOptions) error {
return err
}
}
dest = filepath.Clean(dest)
decompressedArchive, err := archive.DecompressStream(tarArchive)
if err != nil {
return err
}
defer decompressedArchive.Close()
cmd := reexec.Command("docker-untar", dest, buf.String())
cmd.Stdin = archive
cmd.Stdin = decompressedArchive
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Untar %s %s", err, out)

View file

@ -3,8 +3,10 @@ package chrootarchive
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"syscall"
@ -16,19 +18,20 @@ func applyLayer() {
runtime.LockOSThread()
flag.Parse()
if err := syscall.Chroot(flag.Arg(0)); err != nil {
fatal(err)
}
if err := syscall.Chdir("/"); err != nil {
if err := chroot(flag.Arg(0)); err != nil {
fatal(err)
}
// We need to be able to set any perms
oldmask := syscall.Umask(0)
defer syscall.Umask(oldmask)
tmpDir, err := ioutil.TempDir("/", "temp-docker-extract")
if err != nil {
fatal(err)
}
os.Setenv("TMPDIR", tmpDir)
if err := archive.ApplyLayer("/", os.Stdin); err != nil {
os.RemoveAll(tmpDir)
err = archive.UnpackLayer("/", os.Stdin)
os.RemoveAll(tmpDir)
if err != nil {
fatal(err)
}
os.RemoveAll(tmpDir)
@ -37,8 +40,18 @@ func applyLayer() {
}
func ApplyLayer(dest string, layer archive.ArchiveReader) error {
dest = filepath.Clean(dest)
decompressed, err := archive.DecompressStream(layer)
if err != nil {
return err
}
defer func() {
if c, ok := decompressed.(io.Closer); ok {
c.Close()
}
}()
cmd := reexec.Command("docker-applyLayer", dest)
cmd.Stdin = layer
cmd.Stdin = decompressed
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("ApplyLayer %s %s", err, out)

191
pkg/symlink/LICENSE.APACHE Normal file
View file

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

27
pkg/symlink/LICENSE.BSD Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2014 The Docker & Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,2 +1,3 @@
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
Victor Vieux <vieux@docker.com> (@vieux)
Tibor Vass <teabee89@gmail.com> (@tiborvass)
Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
Tianon Gravi <admwiggin@gmail.com> (@tianon)

5
pkg/symlink/README.md Normal file
View file

@ -0,0 +1,5 @@
Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks
from the [Go standard library](https://golang.org/pkg/path/filepath).
The code from filepath.EvalSymlinks has been adapted in fs.go.
Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go.

View file

@ -1,101 +1,131 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.BSD file.
// This code is a modified version of path/filepath/symlink.go from the Go standard library.
package symlink
import (
"fmt"
"bytes"
"errors"
"os"
"path"
"path/filepath"
"strings"
)
const maxLoopCounter = 100
// FollowSymlink will follow an existing link and scope it to the root
// path provided.
// The role of this function is to return an absolute path in the root
// or normalize to the root if the symlink leads to a path which is
// outside of the root.
// Errors encountered while attempting to follow the symlink in path
// will be reported.
// Normalizations to the root don't constitute errors.
func FollowSymlinkInScope(link, root string) (string, error) {
root, err := filepath.Abs(root)
// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path
func FollowSymlinkInScope(path, root string) (string, error) {
path, err := filepath.Abs(path)
if err != nil {
return "", err
}
link, err = filepath.Abs(link)
root, err = filepath.Abs(root)
if err != nil {
return "", err
}
if link == root {
return root, nil
}
if !strings.HasPrefix(filepath.Dir(link), root) {
return "", fmt.Errorf("%s is not within %s", link, root)
}
prev := "/"
for _, p := range strings.Split(link, "/") {
prev = filepath.Join(prev, p)
loopCounter := 0
for {
loopCounter++
if loopCounter >= maxLoopCounter {
return "", fmt.Errorf("loopCounter reached MAX: %v", loopCounter)
}
if !strings.HasPrefix(prev, root) {
// Don't resolve symlinks outside of root. For example,
// we don't have to check /home in the below.
//
// /home -> usr/home
// FollowSymlinkInScope("/home/bob/foo/bar", "/home/bob/foo")
break
}
stat, err := os.Lstat(prev)
if err != nil {
if os.IsNotExist(err) {
break
}
return "", err
}
// let's break if we're not dealing with a symlink
if stat.Mode()&os.ModeSymlink != os.ModeSymlink {
break
}
// process the symlink
dest, err := os.Readlink(prev)
if err != nil {
return "", err
}
if path.IsAbs(dest) {
prev = filepath.Join(root, dest)
} else {
prev, _ = filepath.Abs(prev)
dir := filepath.Dir(prev)
prev = filepath.Join(dir, dest)
if dir == root && !strings.HasPrefix(prev, root) {
prev = root
}
if len(prev) < len(root) || (len(prev) == len(root) && prev != root) {
prev = filepath.Join(root, filepath.Base(dest))
}
}
}
}
if prev == "/" {
prev = root
}
return prev, nil
return evalSymlinksInScope(path, root)
}
// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return
// a result guaranteed to be contained within the scope `root`, at the time of the call.
// Symlinks in `root` are not evaluated and left as-is.
// Errors encountered while attempting to evaluate symlinks in path will be returned.
// Non-existing paths are valid and do not constitute an error.
// `path` has to contain `root` as a prefix, or else an error will be returned.
// Trying to break out from `root` does not constitute an error.
//
// Example:
// If /foo/bar -> /outside,
// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide"
//
// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks
// are created and not to create subsequently, additional symlinks that could potentially make a
// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo")
// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should
// no longer be considered safely contained in "/foo".
func evalSymlinksInScope(path, root string) (string, error) {
root = filepath.Clean(root)
if path == root {
return path, nil
}
if !strings.HasPrefix(path, root) {
return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
}
const maxIter = 255
originalPath := path
// given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c"
path = path[len(root):]
if root == string(filepath.Separator) {
path = string(filepath.Separator) + path
}
if !strings.HasPrefix(path, string(filepath.Separator)) {
return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
}
path = filepath.Clean(path)
// consume path by taking each frontmost path element,
// expanding it if it's a symlink, and appending it to b
var b bytes.Buffer
// b here will always be considered to be the "current absolute path inside
// root" when we append paths to it, we also append a slash and use
// filepath.Clean after the loop to trim the trailing slash
for n := 0; path != ""; n++ {
if n > maxIter {
return "", errors.New("evalSymlinksInScope: too many links in " + originalPath)
}
// find next path component, p
i := strings.IndexRune(path, filepath.Separator)
var p string
if i == -1 {
p, path = path, ""
} else {
p, path = path[:i], path[i+1:]
}
if p == "" {
continue
}
// this takes a b.String() like "b/../" and a p like "c" and turns it
// into "/b/../c" which then gets filepath.Cleaned into "/c" and then
// root gets prepended and we Clean again (to remove any trailing slash
// if the first Clean gave us just "/")
cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p)
if cleanP == string(filepath.Separator) {
// never Lstat "/" itself
b.Reset()
continue
}
fullP := filepath.Clean(root + cleanP)
fi, err := os.Lstat(fullP)
if os.IsNotExist(err) {
// if p does not exist, accept it
b.WriteString(p)
b.WriteRune(filepath.Separator)
continue
}
if err != nil {
return "", err
}
if fi.Mode()&os.ModeSymlink == 0 {
b.WriteString(p + string(filepath.Separator))
continue
}
// it's a symlink, put it at the front of path
dest, err := os.Readlink(fullP)
if err != nil {
return "", err
}
if filepath.IsAbs(dest) {
b.Reset()
}
path = dest + string(filepath.Separator) + path
}
// see note above on "fullP := ..." for why this is double-cleaned and
// what's happening here
return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil
}

View file

@ -1,248 +1,402 @@
// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE
package symlink
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
)
func abs(t *testing.T, p string) string {
o, err := filepath.Abs(p)
if err != nil {
t.Fatal(err)
}
return o
type dirOrLink struct {
path string
target string
}
func TestFollowSymLinkNormal(t *testing.T) {
link := "testdata/fs/a/d/c/data"
func makeFs(tmpdir string, fs []dirOrLink) error {
for _, s := range fs {
s.path = filepath.Join(tmpdir, s.path)
if s.target == "" {
os.MkdirAll(s.path, 0755)
continue
}
if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil {
return err
}
if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) {
return err
}
}
return nil
}
rewrite, err := FollowSymlinkInScope(link, "testdata")
func testSymlink(tmpdir, path, expected, scope string) error {
rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope))
if err != nil {
return err
}
expected, err = filepath.Abs(filepath.Join(tmpdir, expected))
if err != nil {
return err
}
if expected != rewrite {
return fmt.Errorf("Expected %q got %q", expected, rewrite)
}
return nil
}
func TestFollowSymlinkAbsolute(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/b/c/data"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil {
t.Fatal(err)
}
}
func TestFollowSymLinkRelativePath(t *testing.T) {
link := "testdata/fs/i"
rewrite, err := FollowSymlinkInScope(link, "testdata")
func TestFollowSymlinkRelativePath(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/fs/a"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil {
t.Fatal(err)
}
}
func TestFollowSymLinkUnderLinkedDir(t *testing.T) {
dir, err := ioutil.TempDir("", "docker-fs-test")
func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
os.Mkdir(filepath.Join(dir, "realdir"), 0700)
os.Symlink("realdir", filepath.Join(dir, "linkdir"))
linkDir := filepath.Join(dir, "linkdir", "foo")
dirUnderLinkDir := filepath.Join(dir, "linkdir", "foo", "bar")
os.MkdirAll(dirUnderLinkDir, 0700)
rewrite, err := FollowSymlinkInScope(dirUnderLinkDir, linkDir)
if err != nil {
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{
{path: "linkdir", target: "realdir"},
{path: "linkdir/foo/bar"},
}); err != nil {
t.Fatal(err)
}
if rewrite != dirUnderLinkDir {
t.Fatalf("Expected %s got %s", dirUnderLinkDir, rewrite)
if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil {
t.Fatal(err)
}
}
func TestFollowSymLinkRandomString(t *testing.T) {
func TestFollowSymlinkInvalidScopePathPair(t *testing.T) {
if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil {
t.Fatal("Random string should fail but didn't")
t.Fatal("expected an error")
}
}
func TestFollowSymLinkLastLink(t *testing.T) {
link := "testdata/fs/a/d"
rewrite, err := FollowSymlinkInScope(link, "testdata")
func TestFollowSymlinkLastLink(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/b"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil {
t.Fatal(err)
}
}
func TestFollowSymLinkRelativeLink(t *testing.T) {
link := "testdata/fs/a/e/c/data"
rewrite, err := FollowSymlinkInScope(link, "testdata")
func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/fs/b/c/data"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil {
t.Fatal(err)
}
// avoid letting allowing symlink e lead us to ../b
// normalize to the "testdata/fs/a"
if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil {
t.Fatal(err)
}
}
func TestFollowSymLinkRelativeLinkScope(t *testing.T) {
func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil {
t.Fatal(err)
}
// avoid letting symlink f lead us out of the "testdata" scope
// we don't normalize because symlink f is in scope and there is no
// information leak
{
link := "testdata/fs/a/f"
rewrite, err := FollowSymlinkInScope(link, "testdata")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/test"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
}
if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil {
t.Fatal(err)
}
// avoid letting symlink f lead us out of the "testdata/fs" scope
// we don't normalize because symlink f is in scope and there is no
// information leak
{
link := "testdata/fs/a/f"
rewrite, err := FollowSymlinkInScope(link, "testdata/fs")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/fs/test"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
}
if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil {
t.Fatal(err)
}
}
func TestFollowSymlinkRelativeLinkChain(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
// avoid letting symlink g (pointed at by symlink h) take out of scope
// TODO: we should probably normalize to scope here because ../[....]/root
// is out of scope and we leak information
{
link := "testdata/fs/b/h"
rewrite, err := FollowSymlinkInScope(link, "testdata")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/root"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
}
if err := makeFs(tmpdir, []dirOrLink{
{path: "testdata/fs/b/h", target: "../g"},
{path: "testdata/fs/g", target: "../../../../../../../../../../../../root"},
}); err != nil {
t.Fatal(err)
}
// avoid letting allowing symlink e lead us to ../b
// normalize to the "testdata/fs/a"
{
link := "testdata/fs/a/e"
rewrite, err := FollowSymlinkInScope(link, "testdata/fs/a")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/fs/a"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
}
if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil {
t.Fatal(err)
}
}
func TestFollowSymlinkBreakoutPath(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
// avoid letting symlink -> ../directory/file escape from scope
// normalize to "testdata/fs/j"
{
link := "testdata/fs/j/k"
rewrite, err := FollowSymlinkInScope(link, "testdata/fs/j")
if err != nil {
t.Fatal(err)
}
if expected := abs(t, "testdata/fs/j"); expected != rewrite {
t.Fatalf("Expected %s got %s", expected, rewrite)
}
if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil {
t.Fatal(err)
}
}
func TestFollowSymlinkToRoot(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
// make sure we don't allow escaping to /
// normalize to dir
{
dir, err := ioutil.TempDir("", "docker-fs-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
linkFile := filepath.Join(dir, "foo")
os.Mkdir(filepath.Join(dir, ""), 0700)
os.Symlink("/", linkFile)
rewrite, err := FollowSymlinkInScope(linkFile, dir)
if err != nil {
t.Fatal(err)
}
if rewrite != dir {
t.Fatalf("Expected %s got %s", dir, rewrite)
}
if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
t.Fatal(err)
}
}
func TestFollowSymlinkSlashDotdot(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
tmpdir = filepath.Join(tmpdir, "dir", "subdir")
// make sure we don't allow escaping to /
// normalize to dir
{
dir, err := ioutil.TempDir("", "docker-fs-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
linkFile := filepath.Join(dir, "foo")
os.Mkdir(filepath.Join(dir, ""), 0700)
os.Symlink("/../../", linkFile)
rewrite, err := FollowSymlinkInScope(linkFile, dir)
if err != nil {
t.Fatal(err)
}
if rewrite != dir {
t.Fatalf("Expected %s got %s", dir, rewrite)
}
if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
t.Fatal(err)
}
}
func TestFollowSymlinkDotdot(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
tmpdir = filepath.Join(tmpdir, "dir", "subdir")
// make sure we stay in scope without leaking information
// this also checks for escaping to /
// normalize to dir
{
dir, err := ioutil.TempDir("", "docker-fs-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
linkFile := filepath.Join(dir, "foo")
os.Mkdir(filepath.Join(dir, ""), 0700)
os.Symlink("../../", linkFile)
rewrite, err := FollowSymlinkInScope(linkFile, dir)
if err != nil {
t.Fatal(err)
}
if rewrite != dir {
t.Fatalf("Expected %s got %s", dir, rewrite)
}
if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
t.Fatal(err)
}
}
func TestFollowSymlinkRelativePath2(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil {
t.Fatal(err)
}
}
func TestFollowSymlinkScopeLink(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{
{path: "root2"},
{path: "root", target: "root2"},
{path: "root2/foo", target: "../bar"},
}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil {
t.Fatal(err)
}
}
func TestFollowSymlinkRootScope(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
expected, err := filepath.EvalSymlinks(tmpdir)
if err != nil {
t.Fatal(err)
}
rewrite, err := FollowSymlinkInScope(tmpdir, "/")
if err != nil {
t.Fatal(err)
}
if rewrite != expected {
t.Fatalf("expected %q got %q", expected, rewrite)
}
}
func TestFollowSymlinkEmpty(t *testing.T) {
res, err := FollowSymlinkInScope("", "")
if err != nil {
t.Fatal(err)
}
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if res != wd {
t.Fatal("expected %q got %q", wd, res)
}
}
func TestFollowSymlinkCircular(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil {
t.Fatal("expected an error for foo -> foo")
}
if err := makeFs(tmpdir, []dirOrLink{
{path: "root/bar", target: "baz"},
{path: "root/baz", target: "../bak"},
{path: "root/bak", target: "/bar"},
}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil {
t.Fatal("expected an error for bar -> baz -> bak -> bar")
}
}
func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{
{path: "root2"},
{path: "root", target: "root2"},
{path: "root/a", target: "r/s"},
{path: "root/r", target: "../root/t"},
{path: "root/root/t/s/b", target: "/../u"},
{path: "root/u/c", target: "."},
{path: "root/u/x/y", target: "../v"},
{path: "root/u/v", target: "/../w"},
}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil {
t.Fatal(err)
}
}
func TestFollowSymlinkBreakoutNonExistent(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{
{path: "root/slash", target: "/"},
{path: "root/sym", target: "/idontexist/../slash"},
}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil {
t.Fatal(err)
}
}
func TestFollowSymlinkNoLexicalCleaning(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
if err := makeFs(tmpdir, []dirOrLink{
{path: "root/sym", target: "/foo/bar"},
{path: "root/hello", target: "/sym/../baz"},
}); err != nil {
t.Fatal(err)
}
if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil {
t.Fatal(err)
}
}

View file

@ -1 +0,0 @@
/b

View file

@ -1 +0,0 @@
../b

View file

@ -1 +0,0 @@
../../../../test

View file

@ -1 +0,0 @@
../g

View file

@ -1 +0,0 @@
../../../../../../../../../../../../root

View file

@ -1 +0,0 @@
a

View file

@ -1 +0,0 @@
../i/a

View file

@ -23,7 +23,6 @@ var (
ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")")
ErrDoesNotExist = errors.New("Image does not exist")
errLoginRequired = errors.New("Authentication is required.")
validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
validNamespace = regexp.MustCompile(`^([a-z0-9_]{4,30})$`)
validRepo = regexp.MustCompile(`^([a-z0-9-_.]+)$`)
)
@ -171,7 +170,8 @@ func validateRepositoryName(repositoryName string) error {
namespace = "library"
name = nameParts[0]
if validHex.MatchString(name) {
// the repository name must not be a valid image ID
if err := utils.ValidateID(name); err == nil {
return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
}
} else {

View file

@ -31,6 +31,10 @@ type KeyValuePair struct {
Value string
}
var (
validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
)
// Request a given URL and return an io.Reader
func Download(url string) (resp *http.Response, err error) {
if resp, err = http.Get(url); err != nil {
@ -190,11 +194,9 @@ func GenerateRandomID() string {
}
func ValidateID(id string) error {
if id == "" {
return fmt.Errorf("Id can't be empty")
}
if strings.Contains(id, ":") {
return fmt.Errorf("Invalid character in id: ':'")
if ok := validHex.MatchString(id); !ok {
err := fmt.Errorf("image ID '%s' is invalid", id)
return err
}
return nil
}