Browse Source

Support downloading remote tarball contexts in builder jobs.

Signed-off-by: Moysés Borges <moysesb@gmail.com>
Moysés Borges 10 years ago
parent
commit
d48bface59

+ 87 - 12
builder/job.go

@@ -2,6 +2,7 @@ package builder
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -17,6 +18,7 @@ import (
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/httputils"
 	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/progressreader"
 	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/pkg/urlutil"
 	"github.com/docker/docker/registry"
@@ -24,6 +26,10 @@ import (
 	"github.com/docker/docker/utils"
 )
 
+// When downloading remote contexts, limit the amount (in bytes)
+// to be read from the response body in order to detect its Content-Type
+const maxPreambleLength = 100
+
 // whitelist of commands allowed for a commit/import
 var validCommitCommands = map[string]bool{
 	"entrypoint": true,
@@ -91,6 +97,7 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
 		tag      string
 		context  io.ReadCloser
 	)
+	sf := streamformatter.NewJSONStreamFormatter()
 
 	repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName)
 	if repoName != "" {
@@ -121,27 +128,50 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
 	} else if urlutil.IsURL(buildConfig.RemoteURL) {
 		f, err := httputils.Download(buildConfig.RemoteURL)
 		if err != nil {
-			return err
+			return fmt.Errorf("Error downloading remote context %s: %v", buildConfig.RemoteURL, err)
 		}
 		defer f.Body.Close()
-		dockerFile, err := ioutil.ReadAll(f.Body)
+		ct := f.Header.Get("Content-Type")
+		clen := int(f.ContentLength)
+		contentType, bodyReader, err := inspectResponse(ct, f.Body, clen)
+
+		defer bodyReader.Close()
+
 		if err != nil {
-			return err
+			return fmt.Errorf("Error detecting content type for remote %s: %v", buildConfig.RemoteURL, err)
 		}
+		if contentType == httputils.MimeTypes.TextPlain {
+			dockerFile, err := ioutil.ReadAll(bodyReader)
+			if err != nil {
+				return err
+			}
 
-		// When we're downloading just a Dockerfile put it in
-		// the default name - don't allow the client to move/specify it
-		buildConfig.DockerfileName = api.DefaultDockerfileName
+			// When we're downloading just a Dockerfile put it in
+			// the default name - don't allow the client to move/specify it
+			buildConfig.DockerfileName = api.DefaultDockerfileName
 
-		c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile))
-		if err != nil {
-			return err
+			c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile))
+			if err != nil {
+				return err
+			}
+			context = c
+		} else {
+			// Pass through - this is a pre-packaged context, presumably
+			// with a Dockerfile with the right name inside it.
+			prCfg := progressreader.Config{
+				In:        bodyReader,
+				Out:       buildConfig.Stdout,
+				Formatter: sf,
+				Size:      clen,
+				NewLines:  true,
+				ID:        "Downloading context",
+				Action:    buildConfig.RemoteURL,
+			}
+			context = progressreader.New(prCfg)
 		}
-		context = c
 	}
-	defer context.Close()
 
-	sf := streamformatter.NewJSONStreamFormatter()
+	defer context.Close()
 
 	builder := &Builder{
 		Daemon: d,
@@ -241,3 +271,48 @@ func Commit(d *daemon.Daemon, name string, c *daemon.ContainerCommitConfig) (str
 
 	return img.ID, nil
 }
+
+// inspectResponse looks into the http response data at r to determine whether its
+// content-type is on the list of acceptable content types for remote build contexts.
+// This function returns:
+//    - a string representation of the detected content-type
+//    - an io.Reader for the response body
+//    - an error value which will be non-nil either when something goes wrong while
+//      reading bytes from r or when the detected content-type is not acceptable.
+func inspectResponse(ct string, r io.ReadCloser, clen int) (string, io.ReadCloser, error) {
+	plen := clen
+	if plen <= 0 || plen > maxPreambleLength {
+		plen = maxPreambleLength
+	}
+
+	preamble := make([]byte, plen, plen)
+	rlen, err := r.Read(preamble)
+	if rlen == 0 {
+		return ct, r, errors.New("Empty response")
+	}
+	if err != nil && err != io.EOF {
+		return ct, r, err
+	}
+
+	preambleR := bytes.NewReader(preamble)
+	bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r))
+	// Some web servers will use application/octet-stream as the default
+	// content type for files without an extension (e.g. 'Dockerfile')
+	// so if we receive this value we better check for text content
+	contentType := ct
+	if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream {
+		contentType, _, err = httputils.DetectContentType(preamble)
+		if err != nil {
+			return contentType, bodyReader, err
+		}
+	}
+
+	contentType = selectAcceptableMIME(contentType)
+	var cterr error
+	if len(contentType) == 0 {
+		cterr = fmt.Errorf("unsupported Content-Type %q", ct)
+		contentType = ct
+	}
+
+	return contentType, bodyReader, cterr
+}

+ 113 - 0
builder/job_test.go

@@ -0,0 +1,113 @@
+package builder
+
+import (
+	"bytes"
+	"io/ioutil"
+	"testing"
+)
+
+var textPlainDockerfile = "FROM busybox"
+var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic
+
+func TestInspectEmptyResponse(t *testing.T) {
+	ct := "application/octet-stream"
+	br := ioutil.NopCloser(bytes.NewReader([]byte("")))
+	contentType, bReader, err := inspectResponse(ct, br, 0)
+	if err == nil {
+		t.Fatalf("Should have generated an error for an empty response")
+	}
+	if contentType != "application/octet-stream" {
+		t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType)
+	}
+	body, err := ioutil.ReadAll(bReader)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(body) != 0 {
+		t.Fatal("response body should remain empty")
+	}
+}
+
+func TestInspectResponseBinary(t *testing.T) {
+	ct := "application/octet-stream"
+	br := ioutil.NopCloser(bytes.NewReader(binaryContext))
+	contentType, bReader, err := inspectResponse(ct, br, len(binaryContext))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if contentType != "application/octet-stream" {
+		t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType)
+	}
+	body, err := ioutil.ReadAll(bReader)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(body) != len(binaryContext) {
+		t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body))
+	}
+	for i := range body {
+		if body[i] != binaryContext[i] {
+			t.Fatalf("Corrupted response body at byte index %d", i)
+		}
+	}
+}
+
+func TestResponseUnsupportedContentType(t *testing.T) {
+	content := []byte(textPlainDockerfile)
+	ct := "application/json"
+	br := ioutil.NopCloser(bytes.NewReader(content))
+	contentType, bReader, err := inspectResponse(ct, br, len(textPlainDockerfile))
+
+	if err == nil {
+		t.Fatal("Should have returned an error on content-type 'application/json'")
+	}
+	if contentType != ct {
+		t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType)
+	}
+	body, err := ioutil.ReadAll(bReader)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if string(body) != textPlainDockerfile {
+		t.Fatalf("Corrupted response body %s", body)
+	}
+}
+
+func TestInspectResponseTextSimple(t *testing.T) {
+	content := []byte(textPlainDockerfile)
+	ct := "text/plain"
+	br := ioutil.NopCloser(bytes.NewReader(content))
+	contentType, bReader, err := inspectResponse(ct, br, len(content))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if contentType != "text/plain" {
+		t.Fatalf("Content type should be 'text/plain' but is %q", contentType)
+	}
+	body, err := ioutil.ReadAll(bReader)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if string(body) != textPlainDockerfile {
+		t.Fatalf("Corrupted response body %s", body)
+	}
+}
+
+func TestInspectResponseEmptyContentType(t *testing.T) {
+	content := []byte(textPlainDockerfile)
+	br := ioutil.NopCloser(bytes.NewReader(content))
+	contentType, bodyReader, err := inspectResponse("", br, len(content))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if contentType != "text/plain" {
+		t.Fatalf("Content type should be 'text/plain' but is %q", contentType)
+	}
+	body, err := ioutil.ReadAll(bodyReader)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if string(body) != textPlainDockerfile {
+		t.Fatalf("Corrupted response body %s", body)
+	}
+}

+ 9 - 0
builder/support.go

@@ -1,9 +1,18 @@
 package builder
 
 import (
+	"regexp"
 	"strings"
 )
 
+const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))`
+
+var mimeRe = regexp.MustCompile(acceptableRemoteMIME)
+
+func selectAcceptableMIME(ct string) string {
+	return mimeRe.FindString(ct)
+}
+
 func handleJsonArgs(args []string, attributes map[string]bool) []string {
 	if len(args) == 0 {
 		return []string{}

+ 41 - 0
builder/support_test.go

@@ -0,0 +1,41 @@
+package builder
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestSelectAcceptableMIME(t *testing.T) {
+	validMimeStrings := []string{
+		"application/x-bzip2",
+		"application/bzip2",
+		"application/gzip",
+		"application/x-gzip",
+		"application/x-xz",
+		"application/xz",
+		"application/tar",
+		"application/x-tar",
+		"application/octet-stream",
+		"text/plain",
+	}
+
+	invalidMimeStrings := []string{
+		"",
+		"application/octet",
+		"application/json",
+	}
+
+	for _, m := range invalidMimeStrings {
+		if len(selectAcceptableMIME(m)) > 0 {
+			err := fmt.Errorf("Should not have accepted %q", m)
+			t.Fatal(err)
+		}
+	}
+
+	for _, m := range validMimeStrings {
+		if str := selectAcceptableMIME(m); str == "" {
+			err := fmt.Errorf("Should have accepted %q", m)
+			t.Fatal(err)
+		}
+	}
+}

+ 10 - 6
docs/reference/api/docker_remote_api_v1.19.md

@@ -1181,13 +1181,17 @@ or being killed.
 
 Query Parameters:
 
--   **dockerfile** - Path within the build context to the Dockerfile. This is 
-        ignored if `remote` is specified and points to an individual filename.
--   **t** – A repository name (and optionally a tag) to apply to
+-   **dockerfile** - Path within the build context to the `Dockerfile`. This is
+        ignored if `remote` is specified and points to an external `Dockerfile`.
+-   **t** – Repository name (and optionally a tag) to be applied to
         the resulting image in case of success.
--   **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the 
-        URI specifies a filename, the file's contents are placed into a file 
-		called `Dockerfile`.
+-   **remote** – A Git repository URI or HTTP/HTTPS context URI. If the
+        URI points to a single text file, the file's contents are placed into
+        a file called `Dockerfile` and the image is built from that file. If
+        the URI points to a tarball, the file is downloaded by the daemon and
+        the contents therein used as the context for the build. If the URI
+        points to a tarball and the `dockerfile` parameter is also specified,
+        there must be a file with the corresponding path inside the tarball.
 -   **q** – Suppress verbose build output.
 -   **nocache** – Do not use the cache when building the image.
 -   **pull** - Attempt to pull the image even if an older image exists locally.

+ 55 - 15
docs/reference/commandline/cli.md

@@ -706,13 +706,17 @@ to any of the files in the context. For example, your build can use an
 [*ADD*](/reference/builder/#add) instruction to reference a file in the
 context.
 
-The `URL` parameter can specify the location of a Git repository; the repository
-acts as the build context. The system recursively clones the repository and its
-submodules using a `git clone --depth 1 --recursive` command. This command runs
-in a temporary directory on your local host. After the command succeeds, the
-directory is sent to the Docker daemon as the context. Local clones give you the
-ability to access private repositories using local user credentials, VPNs, and
-so forth.
+The `URL` parameter can refer to three kinds of resources: Git repositories,
+pre-packaged tarball contexts and plain text files. 
+
+#### Git repositories
+When the `URL` parameter points to the location of a Git repository, the
+repository acts as the build context. The system recursively clones the
+repository and its submodules using a `git clone --depth 1 --recursive`
+command. This command runs in a temporary directory on your local host. After
+the command succeeds, the directory is sent to the Docker daemon as the
+context. Local clones give you the ability to access private repositories using
+local user credentials, VPN's, and so forth.
 
 Git URLs accept context configuration in their fragment section, separated by a
 colon `:`.  The first part represents the reference that Git will check out,
@@ -739,21 +743,34 @@ Build Syntax Suffix | Commit Used | Build Context Used
 `myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder`
 `myrepo.git#abcdef:myfolder` | `sha1 = abcdef` | `/myfolder`
 
-Instead of specifying a context, you can pass a single Dockerfile in the `URL`
-or pipe the file in via `STDIN`. To pipe a Dockerfile from `STDIN`:
+#### Tarball contexts
+If you pass an URL to a remote tarball, the URL itself is sent to the daemon:
 
-    docker build - < Dockerfile
+    $ docker build http://server/context.tar.gz
 
-If you use STDIN or specify a `URL`, the system places the contents into a file
-called `Dockerfile`, and any `-f`, `--file` option is ignored. In this
-scenario, there is no context.
+The download operation will be performed on the host the Docker daemon is
+running on, which is not necessarily the same host from which the build command
+is being issued. The Docker daemon will fetch `context.tar.gz` and use it as the
+build context. Tarball contexts must be tar archives conforming to the standard
+`tar` UNIX format and can be compressed with any one of the 'xz', 'bzip2',
+'gzip' or 'identity' (no compression) formats.
+
+#### Text files
+Instead of specifying a context, you can pass a single `Dockerfile` in the
+`URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`:
+
+    $ docker build - < Dockerfile
+
+If you use `STDIN` or specify a `URL` pointing to a plain text file, the system
+places the contents into a file called `Dockerfile`, and any `-f`, `--file`
+option is ignored. In this scenario, there is no context.
 
 By default the `docker build` command will look for a `Dockerfile` at the root
 of the build context. The `-f`, `--file`, option lets you specify the path to
 an alternative file to use instead. This is useful in cases where the same set
 of files are used for multiple builds. The path must be to a file within the
-build context. If a relative path is specified then it must to be relative to
-the current directory.
+build context. If a relative path is specified then it is interpreted as
+relative to the root of the context.
 
 In most cases, it's best to put each Dockerfile in an empty directory. Then,
 add to that directory only the files needed for building the Dockerfile. To
@@ -883,6 +900,29 @@ The Dockerfile at the root of the repository is used as Dockerfile. Note that
 you can specify an arbitrary Git repository by using the `git://` or `git@`
 schema.
 
+
+    $ docker build -f ctx/Dockerfile http://server/ctx.tar.gz
+    Downloading context: http://server/ctx.tar.gz [===================>]    240 B/240 B
+    Step 0 : FROM busybox
+     ---> 8c2e06607696
+    Step 1 : ADD ctx/container.cfg /
+     ---> e7829950cee3
+    Removing intermediate container b35224abf821
+    Step 2 : CMD /bin/ls
+     ---> Running in fbc63d321d73
+     ---> 3286931702ad
+    Removing intermediate container fbc63d321d73
+    Successfully built 377c409b35e4
+
+
+This will send the URL `http://server/ctx.tar.gz` to the Docker daemon, which
+will download and extract the referenced tarball. The `-f ctx/Dockerfile`
+parameter specifies a path inside `ctx.tar.gz` to the `Dockerfile` that will
+be used to build the image. Any `ADD` commands in that `Dockerfile` that
+refer to local paths must be relative to the root of the contents inside
+`ctx.tar.gz`. In the example above, the tarball contains a directory `ctx/`,
+so the `ADD ctx/container.cfg /` operation works as expected.
+
     $ docker build -f Dockerfile.debug .
 
 This will use a file called `Dockerfile.debug` for the build instructions

+ 85 - 0
integration-cli/docker_api_containers_test.go

@@ -534,6 +534,91 @@ RUN find /tmp/`,
 	}
 }
 
+func (s *DockerSuite) TestBuildApiRemoteTarballContext(c *check.C) {
+	buffer := new(bytes.Buffer)
+	tw := tar.NewWriter(buffer)
+	defer tw.Close()
+
+	dockerfile := []byte("FROM busybox")
+	if err := tw.WriteHeader(&tar.Header{
+		Name: "Dockerfile",
+		Size: int64(len(dockerfile)),
+	}); err != nil {
+		c.Fatalf("failed to write tar file header: %v", err)
+	}
+	if _, err := tw.Write(dockerfile); err != nil {
+		c.Fatalf("failed to write tar file content: %v", err)
+	}
+	if err := tw.Close(); err != nil {
+		c.Fatalf("failed to close tar archive: %v", err)
+	}
+
+	server, err := fakeBinaryStorage(map[string]*bytes.Buffer{
+		"testT.tar": buffer,
+	})
+	c.Assert(err, check.IsNil)
+
+	defer server.Close()
+
+	res, _, err := sockRequestRaw("POST", "/build?remote="+server.URL()+"/testT.tar", nil, "application/tar")
+	c.Assert(err, check.IsNil)
+	c.Assert(res.StatusCode, check.Equals, http.StatusOK)
+}
+
+func (s *DockerSuite) TestBuildApiRemoteTarballContextWithCustomDockerfile(c *check.C) {
+	buffer := new(bytes.Buffer)
+	tw := tar.NewWriter(buffer)
+	defer tw.Close()
+
+	dockerfile := []byte(`FROM busybox
+RUN echo 'wrong'`)
+	if err := tw.WriteHeader(&tar.Header{
+		Name: "Dockerfile",
+		Size: int64(len(dockerfile)),
+	}); err != nil {
+		c.Fatalf("failed to write tar file header: %v", err)
+	}
+	if _, err := tw.Write(dockerfile); err != nil {
+		c.Fatalf("failed to write tar file content: %v", err)
+	}
+
+	custom := []byte(`FROM busybox
+RUN echo 'right'
+`)
+	if err := tw.WriteHeader(&tar.Header{
+		Name: "custom",
+		Size: int64(len(custom)),
+	}); err != nil {
+		c.Fatalf("failed to write tar file header: %v", err)
+	}
+	if _, err := tw.Write(custom); err != nil {
+		c.Fatalf("failed to write tar file content: %v", err)
+	}
+
+	if err := tw.Close(); err != nil {
+		c.Fatalf("failed to close tar archive: %v", err)
+	}
+
+	server, err := fakeBinaryStorage(map[string]*bytes.Buffer{
+		"testT.tar": buffer,
+	})
+	c.Assert(err, check.IsNil)
+
+	defer server.Close()
+	url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar"
+	res, body, err := sockRequestRaw("POST", url, nil, "application/tar")
+	c.Assert(err, check.IsNil)
+	c.Assert(res.StatusCode, check.Equals, http.StatusOK)
+
+	defer body.Close()
+	content, err := readBody(body)
+	c.Assert(err, check.IsNil)
+
+	if strings.Contains(string(content), "wrong") {
+		c.Fatalf("Build used the wrong dockerfile.")
+	}
+}
+
 func (s *DockerSuite) TestBuildApiLowerDockerfile(c *check.C) {
 	git, err := fakeGIT("repo", map[string]string{
 		"dockerfile": `FROM busybox

+ 40 - 0
integration-cli/docker_cli_build_test.go

@@ -4167,6 +4167,46 @@ func (s *DockerSuite) TestBuildFromGITWithContext(c *check.C) {
 	}
 }
 
+func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) {
+	name := "testbuildfromremotetarball"
+
+	buffer := new(bytes.Buffer)
+	tw := tar.NewWriter(buffer)
+	defer tw.Close()
+
+	dockerfile := []byte(`FROM busybox
+					MAINTAINER docker`)
+	if err := tw.WriteHeader(&tar.Header{
+		Name: "Dockerfile",
+		Size: int64(len(dockerfile)),
+	}); err != nil {
+		c.Fatalf("failed to write tar file header: %v", err)
+	}
+	if _, err := tw.Write(dockerfile); err != nil {
+		c.Fatalf("failed to write tar file content: %v", err)
+	}
+	if err := tw.Close(); err != nil {
+		c.Fatalf("failed to close tar archive: %v", err)
+	}
+
+	server, err := fakeBinaryStorage(map[string]*bytes.Buffer{
+		"testT.tar": buffer,
+	})
+	c.Assert(err, check.IsNil)
+
+	defer server.Close()
+
+	_, err = buildImageFromPath(name, server.URL()+"/testT.tar", true)
+	c.Assert(err, check.IsNil)
+
+	res, err := inspectField(name, "Author")
+	c.Assert(err, check.IsNil)
+
+	if res != "docker" {
+		c.Fatalf("Maintainer should be docker, got %s", res)
+	}
+}
+
 func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) {
 	name := "testbuildcmdcleanuponentrypoint"
 	if _, err := buildImage(name,

+ 31 - 7
integration-cli/docker_utils.go

@@ -632,6 +632,10 @@ type FakeContext struct {
 }
 
 func (f *FakeContext) Add(file, content string) error {
+	return f.addFile(file, []byte(content))
+}
+
+func (f *FakeContext) addFile(file string, content []byte) error {
 	filepath := path.Join(f.Dir, file)
 	dirpath := path.Dir(filepath)
 	if dirpath != "." {
@@ -639,7 +643,8 @@ func (f *FakeContext) Add(file, content string) error {
 			return err
 		}
 	}
-	return ioutil.WriteFile(filepath, []byte(content), 0644)
+	return ioutil.WriteFile(filepath, content, 0644)
+
 }
 
 func (f *FakeContext) Delete(file string) error {
@@ -651,11 +656,7 @@ func (f *FakeContext) Close() error {
 	return os.RemoveAll(f.Dir)
 }
 
-func fakeContextFromDir(dir string) *FakeContext {
-	return &FakeContext{dir}
-}
-
-func fakeContextWithFiles(files map[string]string) (*FakeContext, error) {
+func fakeContextFromNewTempDir() (*FakeContext, error) {
 	tmp, err := ioutil.TempDir("", "fake-context")
 	if err != nil {
 		return nil, err
@@ -663,8 +664,18 @@ func fakeContextWithFiles(files map[string]string) (*FakeContext, error) {
 	if err := os.Chmod(tmp, 0755); err != nil {
 		return nil, err
 	}
+	return fakeContextFromDir(tmp), nil
+}
+
+func fakeContextFromDir(dir string) *FakeContext {
+	return &FakeContext{dir}
+}
 
-	ctx := fakeContextFromDir(tmp)
+func fakeContextWithFiles(files map[string]string) (*FakeContext, error) {
+	ctx, err := fakeContextFromNewTempDir()
+	if err != nil {
+		return nil, err
+	}
 	for file, content := range files {
 		if err := ctx.Add(file, content); err != nil {
 			ctx.Close()
@@ -701,6 +712,19 @@ type FakeStorage interface {
 	CtxDir() string
 }
 
+func fakeBinaryStorage(archives map[string]*bytes.Buffer) (FakeStorage, error) {
+	ctx, err := fakeContextFromNewTempDir()
+	if err != nil {
+		return nil, err
+	}
+	for name, content := range archives {
+		if err := ctx.addFile(name, content.Bytes()); err != nil {
+			return nil, err
+		}
+	}
+	return fakeStorageWithContext(ctx)
+}
+
 // fakeStorage returns either a local or remote (at daemon machine) file server
 func fakeStorage(files map[string]string) (FakeStorage, error) {
 	ctx, err := fakeContextWithFiles(files)

+ 20 - 4
man/docker-build.1.md

@@ -37,13 +37,18 @@ daemon, not by the CLI, so the whole context must be transferred to the daemon.
 The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to 
 the daemon.
 
-When a single Dockerfile is given as the URL, then no context is set.
-When a Git repository is set as the **URL**, the repository is used
-as context.
+When the URL to a tarball archive or to a single Dockerfile is given, no context is sent from
+the client to the Docker daemon. When a Git repository is set as the **URL**, the repository is
+cloned locally and then sent as the context.
 
 # OPTIONS
 **-f**, **--file**=*PATH/Dockerfile*
-   Path to the Dockerfile to use. If the path is a relative path then it must be relative to the current directory. The file must be within the build context. The default is *Dockerfile*.
+   Path to the Dockerfile to use. If the path is a relative path and you are
+   building from a local directory, then the path must be relative to that
+   directory. If you are building from a remote URL pointing to either a
+   tarball or a Git repository, then the path must be relative to the root of
+   the remote context. In all cases, the file must be within the build context.
+   The default is *Dockerfile*.
 
 **--force-rm**=*true*|*false*
    Always remove intermediate containers, even after unsuccessful builds. The default is *false*.
@@ -209,6 +214,17 @@ repository.
 
 Note: You can set an arbitrary Git repository via the `git://` schema.
 
+## Building an image using a URL to a tarball'ed context
+
+This will send the URL itself to the Docker daemon. The daemon will fetch the
+tarball archive, decompress it and use its contents as the build context. If you
+pass an *-f PATH/Dockerfile* option as well, the system will look for that file
+inside the contents of the tarball.
+
+    docker build -f dev/Dockerfile https://10.10.10.1/docker/context.tar.gz
+
+Note: supported compression formats are 'xz', 'bzip2', 'gzip' and 'identity' (no compression).
+
 # HISTORY
 March 2014, Originally compiled by William Henry (whenry at redhat dot com)
 based on docker.com source material and internal work.

+ 30 - 0
pkg/httputils/mimetype.go

@@ -0,0 +1,30 @@
+package httputils
+
+import (
+	"mime"
+	"net/http"
+)
+
+var MimeTypes = struct {
+	TextPlain   string
+	Tar         string
+	OctetStream string
+}{"text/plain", "application/tar", "application/octet-stream"}
+
+// DetectContentType returns a best guess representation of the MIME
+// content type for the bytes at c.  The value detected by
+// http.DetectContentType is guaranteed not be nil, defaulting to
+// application/octet-stream when a better guess cannot be made. The
+// result of this detection is then run through mime.ParseMediaType()
+// which separates it from any parameters.
+// Note that calling this function does not advance the Reader at r
+func DetectContentType(c []byte) (string, map[string]string, error) {
+
+	ct := http.DetectContentType(c)
+	contentType, args, err := mime.ParseMediaType(ct)
+	if err != nil {
+		return "", nil, err
+	}
+
+	return contentType, args, nil
+}