فهرست منبع

builder: fix references to jobs in daemon, make builder a first class
package referring to evaluator

Docker-DCO-1.1-Signed-off-by: Erik Hollensbe <github@hollensbe.org> (github: erikh)

Erik Hollensbe 11 سال پیش
والد
کامیت
1ae4c00a19
9فایلهای تغییر یافته به همراه309 افزوده شده و 1184 حذف شده
  1. 34 0
      builder/builder.go
  2. 53 53
      builder/evaluator/dispatchers.go
  3. 31 57
      builder/evaluator/evaluator.go
  4. 65 65
      builder/evaluator/internals.go
  5. 2 2
      builder/evaluator/support.go
  6. 119 0
      builder/job.go
  7. 0 1006
      daemon/build.go
  8. 0 1
      daemon/daemon.go
  9. 5 0
      docker/daemon.go

+ 34 - 0
builder/builder.go

@@ -0,0 +1,34 @@
+package builder
+
+import (
+	"github.com/docker/docker/builder/evaluator"
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/runconfig"
+)
+
+// Create a new builder.
+func NewBuilder(opts *evaluator.BuildOpts) *evaluator.BuildFile {
+	return &evaluator.BuildFile{
+		Dockerfile:    nil,
+		Env:           evaluator.EnvMap{},
+		Config:        initRunConfig(),
+		Options:       opts,
+		TmpContainers: evaluator.UniqueMap{},
+		TmpImages:     evaluator.UniqueMap{},
+	}
+}
+
+func initRunConfig() *runconfig.Config {
+	return &runconfig.Config{
+		PortSpecs: []string{},
+		// FIXME(erikh) this should be a type that lives in runconfig
+		ExposedPorts: map[nat.Port]struct{}{},
+		Env:          []string{},
+		Cmd:          []string{},
+
+		// FIXME(erikh) this should also be a type in runconfig
+		Volumes:    map[string]struct{}{},
+		Entrypoint: []string{"/bin/sh", "-c"},
+		OnBuild:    []string{},
+	}
+}

+ 53 - 53
builder/evaluator/dispatchers.go

@@ -18,7 +18,7 @@ import (
 )
 )
 
 
 // dispatch with no layer / parsing. This is effectively not a command.
 // dispatch with no layer / parsing. This is effectively not a command.
-func nullDispatch(b *buildFile, args []string) error {
+func nullDispatch(b *BuildFile, args []string) error {
 	return nil
 	return nil
 }
 }
 
 
@@ -27,7 +27,7 @@ func nullDispatch(b *buildFile, args []string) error {
 // Sets the environment variable foo to bar, also makes interpolation
 // Sets the environment variable foo to bar, also makes interpolation
 // in the dockerfile available from the next statement on via ${foo}.
 // in the dockerfile available from the next statement on via ${foo}.
 //
 //
-func env(b *buildFile, args []string) error {
+func env(b *BuildFile, args []string) error {
 	if len(args) != 2 {
 	if len(args) != 2 {
 		return fmt.Errorf("ENV accepts two arguments")
 		return fmt.Errorf("ENV accepts two arguments")
 	}
 	}
@@ -35,22 +35,22 @@ func env(b *buildFile, args []string) error {
 	// the duplication here is intended to ease the replaceEnv() call's env
 	// the duplication here is intended to ease the replaceEnv() call's env
 	// handling. This routine gets much shorter with the denormalization here.
 	// handling. This routine gets much shorter with the denormalization here.
 	key := args[0]
 	key := args[0]
-	b.env[key] = args[1]
-	b.config.Env = append(b.config.Env, strings.Join([]string{key, b.env[key]}, "="))
+	b.Env[key] = args[1]
+	b.Config.Env = append(b.Config.Env, strings.Join([]string{key, b.Env[key]}, "="))
 
 
-	return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s=%s", key, b.env[key]))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("ENV %s=%s", key, b.Env[key]))
 }
 }
 
 
 // MAINTAINER some text <maybe@an.email.address>
 // MAINTAINER some text <maybe@an.email.address>
 //
 //
 // Sets the maintainer metadata.
 // Sets the maintainer metadata.
-func maintainer(b *buildFile, args []string) error {
+func maintainer(b *BuildFile, args []string) error {
 	if len(args) != 1 {
 	if len(args) != 1 {
 		return fmt.Errorf("MAINTAINER requires only one argument")
 		return fmt.Errorf("MAINTAINER requires only one argument")
 	}
 	}
 
 
 	b.maintainer = args[0]
 	b.maintainer = args[0]
-	return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
 }
 }
 
 
 // ADD foo /path
 // ADD foo /path
@@ -58,7 +58,7 @@ func maintainer(b *buildFile, args []string) error {
 // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
 // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
 // exist here. If you do not wish to have this automatic handling, use COPY.
 // exist here. If you do not wish to have this automatic handling, use COPY.
 //
 //
-func add(b *buildFile, args []string) error {
+func add(b *BuildFile, args []string) error {
 	if len(args) != 2 {
 	if len(args) != 2 {
 		return fmt.Errorf("ADD requires two arguments")
 		return fmt.Errorf("ADD requires two arguments")
 	}
 	}
@@ -70,7 +70,7 @@ func add(b *buildFile, args []string) error {
 //
 //
 // Same as 'ADD' but without the tar and remote url handling.
 // Same as 'ADD' but without the tar and remote url handling.
 //
 //
-func dispatchCopy(b *buildFile, args []string) error {
+func dispatchCopy(b *BuildFile, args []string) error {
 	if len(args) != 2 {
 	if len(args) != 2 {
 		return fmt.Errorf("COPY requires two arguments")
 		return fmt.Errorf("COPY requires two arguments")
 	}
 	}
@@ -82,16 +82,16 @@ func dispatchCopy(b *buildFile, args []string) error {
 //
 //
 // This sets the image the dockerfile will build on top of.
 // This sets the image the dockerfile will build on top of.
 //
 //
-func from(b *buildFile, args []string) error {
+func from(b *BuildFile, args []string) error {
 	if len(args) != 1 {
 	if len(args) != 1 {
 		return fmt.Errorf("FROM requires one argument")
 		return fmt.Errorf("FROM requires one argument")
 	}
 	}
 
 
 	name := args[0]
 	name := args[0]
 
 
-	image, err := b.options.Daemon.Repositories().LookupImage(name)
+	image, err := b.Options.Daemon.Repositories().LookupImage(name)
 	if err != nil {
 	if err != nil {
-		if b.options.Daemon.Graph().IsNotExist(err) {
+		if b.Options.Daemon.Graph().IsNotExist(err) {
 			image, err = b.pullImage(name)
 			image, err = b.pullImage(name)
 		}
 		}
 
 
@@ -114,7 +114,7 @@ func from(b *buildFile, args []string) error {
 // special cases. search for 'OnBuild' in internals.go for additional special
 // special cases. search for 'OnBuild' in internals.go for additional special
 // cases.
 // cases.
 //
 //
-func onbuild(b *buildFile, args []string) error {
+func onbuild(b *BuildFile, args []string) error {
 	triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
 	triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
 	switch triggerInstruction {
 	switch triggerInstruction {
 	case "ONBUILD":
 	case "ONBUILD":
@@ -125,15 +125,15 @@ func onbuild(b *buildFile, args []string) error {
 
 
 	trigger := strings.Join(args, " ")
 	trigger := strings.Join(args, " ")
 
 
-	b.config.OnBuild = append(b.config.OnBuild, trigger)
-	return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
+	b.Config.OnBuild = append(b.Config.OnBuild, trigger)
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
 }
 }
 
 
 // WORKDIR /tmp
 // WORKDIR /tmp
 //
 //
 // Set the working directory for future RUN/CMD/etc statements.
 // Set the working directory for future RUN/CMD/etc statements.
 //
 //
-func workdir(b *buildFile, args []string) error {
+func workdir(b *BuildFile, args []string) error {
 	if len(args) != 1 {
 	if len(args) != 1 {
 		return fmt.Errorf("WORKDIR requires exactly one argument")
 		return fmt.Errorf("WORKDIR requires exactly one argument")
 	}
 	}
@@ -141,15 +141,15 @@ func workdir(b *buildFile, args []string) error {
 	workdir := args[0]
 	workdir := args[0]
 
 
 	if workdir[0] == '/' {
 	if workdir[0] == '/' {
-		b.config.WorkingDir = workdir
+		b.Config.WorkingDir = workdir
 	} else {
 	} else {
-		if b.config.WorkingDir == "" {
-			b.config.WorkingDir = "/"
+		if b.Config.WorkingDir == "" {
+			b.Config.WorkingDir = "/"
 		}
 		}
-		b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir)
+		b.Config.WorkingDir = filepath.Join(b.Config.WorkingDir, workdir)
 	}
 	}
 
 
-	return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
 }
 }
 
 
 // RUN some command yo
 // RUN some command yo
@@ -161,7 +161,7 @@ func workdir(b *buildFile, args []string) error {
 // RUN echo hi          # sh -c echo hi
 // RUN echo hi          # sh -c echo hi
 // RUN [ "echo", "hi" ] # echo hi
 // RUN [ "echo", "hi" ] # echo hi
 //
 //
-func run(b *buildFile, args []string) error {
+func run(b *BuildFile, args []string) error {
 	if len(args) == 1 { // literal string command, not an exec array
 	if len(args) == 1 { // literal string command, not an exec array
 		args = append([]string{"/bin/sh", "-c"}, args[0])
 		args = append([]string{"/bin/sh", "-c"}, args[0])
 	}
 	}
@@ -175,14 +175,14 @@ func run(b *buildFile, args []string) error {
 		return err
 		return err
 	}
 	}
 
 
-	cmd := b.config.Cmd
+	cmd := b.Config.Cmd
 	// set Cmd manually, this is special case only for Dockerfiles
 	// set Cmd manually, this is special case only for Dockerfiles
-	b.config.Cmd = config.Cmd
-	runconfig.Merge(b.config, config)
+	b.Config.Cmd = config.Cmd
+	runconfig.Merge(b.Config, config)
 
 
-	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
+	defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
 
 
-	utils.Debugf("Command to be executed: %v", b.config.Cmd)
+	utils.Debugf("Command to be executed: %v", b.Config.Cmd)
 
 
 	hit, err := b.probeCache()
 	hit, err := b.probeCache()
 	if err != nil {
 	if err != nil {
@@ -217,13 +217,13 @@ func run(b *buildFile, args []string) error {
 // Set the default command to run in the container (which may be empty).
 // Set the default command to run in the container (which may be empty).
 // Argument handling is the same as RUN.
 // Argument handling is the same as RUN.
 //
 //
-func cmd(b *buildFile, args []string) error {
+func cmd(b *BuildFile, args []string) error {
 	if len(args) < 2 {
 	if len(args) < 2 {
 		args = append([]string{"/bin/sh", "-c"}, args...)
 		args = append([]string{"/bin/sh", "-c"}, args...)
 	}
 	}
 
 
-	b.config.Cmd = args
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
+	b.Config.Cmd = args
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -236,17 +236,17 @@ func cmd(b *buildFile, args []string) error {
 // Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will
 // Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will
 // accept the CMD as the arguments to /usr/sbin/nginx.
 // accept the CMD as the arguments to /usr/sbin/nginx.
 //
 //
-// Handles command processing similar to CMD and RUN, only b.config.Entrypoint
+// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint
 // is initialized at NewBuilder time instead of through argument parsing.
 // is initialized at NewBuilder time instead of through argument parsing.
 //
 //
-func entrypoint(b *buildFile, args []string) error {
-	b.config.Entrypoint = args
+func entrypoint(b *BuildFile, args []string) error {
+	b.Config.Entrypoint = args
 
 
 	// if there is no cmd in current Dockerfile - cleanup cmd
 	// if there is no cmd in current Dockerfile - cleanup cmd
 	if !b.cmdSet {
 	if !b.cmdSet {
-		b.config.Cmd = nil
+		b.Config.Cmd = nil
 	}
 	}
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
@@ -255,28 +255,28 @@ func entrypoint(b *buildFile, args []string) error {
 // EXPOSE 6667/tcp 7000/tcp
 // EXPOSE 6667/tcp 7000/tcp
 //
 //
 // Expose ports for links and port mappings. This all ends up in
 // Expose ports for links and port mappings. This all ends up in
-// b.config.ExposedPorts for runconfig.
+// b.Config.ExposedPorts for runconfig.
 //
 //
-func expose(b *buildFile, args []string) error {
+func expose(b *BuildFile, args []string) error {
 	portsTab := args
 	portsTab := args
 
 
-	if b.config.ExposedPorts == nil {
-		b.config.ExposedPorts = make(nat.PortSet)
+	if b.Config.ExposedPorts == nil {
+		b.Config.ExposedPorts = make(nat.PortSet)
 	}
 	}
 
 
-	ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...))
+	ports, _, err := nat.ParsePortSpecs(append(portsTab, b.Config.PortSpecs...))
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	for port := range ports {
 	for port := range ports {
-		if _, exists := b.config.ExposedPorts[port]; !exists {
-			b.config.ExposedPorts[port] = struct{}{}
+		if _, exists := b.Config.ExposedPorts[port]; !exists {
+			b.Config.ExposedPorts[port] = struct{}{}
 		}
 		}
 	}
 	}
-	b.config.PortSpecs = nil
+	b.Config.PortSpecs = nil
 
 
-	return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
 }
 }
 
 
 // USER foo
 // USER foo
@@ -284,13 +284,13 @@ func expose(b *buildFile, args []string) error {
 // Set the user to 'foo' for future commands and when running the
 // Set the user to 'foo' for future commands and when running the
 // ENTRYPOINT/CMD at container run time.
 // ENTRYPOINT/CMD at container run time.
 //
 //
-func user(b *buildFile, args []string) error {
+func user(b *BuildFile, args []string) error {
 	if len(args) != 1 {
 	if len(args) != 1 {
 		return fmt.Errorf("USER requires exactly one argument")
 		return fmt.Errorf("USER requires exactly one argument")
 	}
 	}
 
 
-	b.config.User = args[0]
-	return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
+	b.Config.User = args[0]
+	return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
 }
 }
 
 
 // VOLUME /foo
 // VOLUME /foo
@@ -298,26 +298,26 @@ func user(b *buildFile, args []string) error {
 // Expose the volume /foo for use. Will also accept the JSON form, but either
 // Expose the volume /foo for use. Will also accept the JSON form, but either
 // way requires exactly one argument.
 // way requires exactly one argument.
 //
 //
-func volume(b *buildFile, args []string) error {
+func volume(b *BuildFile, args []string) error {
 	if len(args) != 1 {
 	if len(args) != 1 {
 		return fmt.Errorf("Volume cannot be empty")
 		return fmt.Errorf("Volume cannot be empty")
 	}
 	}
 
 
 	volume := args
 	volume := args
 
 
-	if b.config.Volumes == nil {
-		b.config.Volumes = map[string]struct{}{}
+	if b.Config.Volumes == nil {
+		b.Config.Volumes = map[string]struct{}{}
 	}
 	}
 	for _, v := range volume {
 	for _, v := range volume {
-		b.config.Volumes[v] = struct{}{}
+		b.Config.Volumes[v] = struct{}{}
 	}
 	}
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
+	if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
 		return err
 		return err
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
 // INSERT is no longer accepted, but we still parse it.
 // INSERT is no longer accepted, but we still parse it.
-func insert(b *buildFile, args []string) error {
+func insert(b *BuildFile, args []string) error {
 	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
 	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
 }
 }

+ 31 - 57
builder/evaluator/evaluator.go

@@ -32,21 +32,23 @@ import (
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
-	"github.com/docker/docker/nat"
 	"github.com/docker/docker/pkg/tarsum"
 	"github.com/docker/docker/pkg/tarsum"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
 )
 )
 
 
+type EnvMap map[string]string
+type UniqueMap map[string]struct{}
+
 var (
 var (
 	ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
 	ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
 )
 )
 
 
-var evaluateTable map[string]func(*buildFile, []string) error
+var evaluateTable map[string]func(*BuildFile, []string) error
 
 
 func init() {
 func init() {
-	evaluateTable = map[string]func(*buildFile, []string) error{
+	evaluateTable = map[string]func(*BuildFile, []string) error{
 		"env":            env,
 		"env":            env,
 		"maintainer":     maintainer,
 		"maintainer":     maintainer,
 		"add":            add,
 		"add":            add,
@@ -65,25 +67,24 @@ func init() {
 	}
 	}
 }
 }
 
 
-type envMap map[string]string
-type uniqueMap map[string]struct{}
-
 // internal struct, used to maintain configuration of the Dockerfile's
 // internal struct, used to maintain configuration of the Dockerfile's
 // processing as it evaluates the parsing result.
 // processing as it evaluates the parsing result.
-type buildFile struct {
-	dockerfile  *parser.Node      // the syntax tree of the dockerfile
-	env         envMap            // map of environment variables
-	image       string            // image name for commit processing
-	config      *runconfig.Config // runconfig for cmd, run, entrypoint etc.
-	options     *BuildOpts        // see below
-	maintainer  string            // maintainer name. could probably be removed.
-	cmdSet      bool              // indicates is CMD was set in current Dockerfile
-	context     *tarsum.TarSum    // the context is a tarball that is uploaded by the client
-	contextPath string            // the path of the temporary directory the local context is unpacked to (server side)
+type BuildFile struct {
+	Dockerfile *parser.Node      // the syntax tree of the dockerfile
+	Env        EnvMap            // map of environment variables
+	Config     *runconfig.Config // runconfig for cmd, run, entrypoint etc.
+	Options    *BuildOpts        // see below
 
 
 	// both of these are controlled by the Remove and ForceRemove options in BuildOpts
 	// both of these are controlled by the Remove and ForceRemove options in BuildOpts
-	tmpContainers uniqueMap // a map of containers used for removes
-	tmpImages     uniqueMap // a map of images used for removes
+	TmpContainers UniqueMap // a map of containers used for removes
+	TmpImages     UniqueMap // a map of images used for removes
+
+	image       string         // image name for commit processing
+	maintainer  string         // maintainer name. could probably be removed.
+	cmdSet      bool           // indicates is CMD was set in current Dockerfile
+	context     *tarsum.TarSum // the context is a tarball that is uploaded by the client
+	contextPath string         // the path of the temporary directory the local context is unpacked to (server side)
+
 }
 }
 
 
 type BuildOpts struct {
 type BuildOpts struct {
@@ -110,18 +111,6 @@ type BuildOpts struct {
 	StreamFormatter *utils.StreamFormatter
 	StreamFormatter *utils.StreamFormatter
 }
 }
 
 
-// Create a new builder.
-func NewBuilder(opts *BuildOpts) (*buildFile, error) {
-	return &buildFile{
-		dockerfile:    nil,
-		env:           envMap{},
-		config:        initRunConfig(),
-		options:       opts,
-		tmpContainers: make(uniqueMap),
-		tmpImages:     make(uniqueMap),
-	}, nil
-}
-
 // Run the builder with the context. This is the lynchpin of this package. This
 // Run the builder with the context. This is the lynchpin of this package. This
 // will (barring errors):
 // will (barring errors):
 //
 //
@@ -134,7 +123,7 @@ func NewBuilder(opts *BuildOpts) (*buildFile, error) {
 //   processing.
 //   processing.
 // * Print a happy message and return the image ID.
 // * Print a happy message and return the image ID.
 //
 //
-func (b *buildFile) Run(context io.Reader) (string, error) {
+func (b *BuildFile) Run(context io.Reader) (string, error) {
 	if err := b.readContext(context); err != nil {
 	if err := b.readContext(context); err != nil {
 		return "", err
 		return "", err
 	}
 	}
@@ -155,16 +144,16 @@ func (b *buildFile) Run(context io.Reader) (string, error) {
 		return "", err
 		return "", err
 	}
 	}
 
 
-	b.dockerfile = ast
+	b.Dockerfile = ast
 
 
-	for i, n := range b.dockerfile.Children {
+	for i, n := range b.Dockerfile.Children {
 		if err := b.dispatch(i, n); err != nil {
 		if err := b.dispatch(i, n); err != nil {
-			if b.options.ForceRemove {
-				b.clearTmp(b.tmpContainers)
+			if b.Options.ForceRemove {
+				b.clearTmp(b.TmpContainers)
 			}
 			}
 			return "", err
 			return "", err
-		} else if b.options.Remove {
-			b.clearTmp(b.tmpContainers)
+		} else if b.Options.Remove {
+			b.clearTmp(b.TmpContainers)
 		}
 		}
 	}
 	}
 
 
@@ -172,32 +161,17 @@ func (b *buildFile) Run(context io.Reader) (string, error) {
 		return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?\n")
 		return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?\n")
 	}
 	}
 
 
-	fmt.Fprintf(b.options.OutStream, "Successfully built %s\n", utils.TruncateID(b.image))
+	fmt.Fprintf(b.Options.OutStream, "Successfully built %s\n", utils.TruncateID(b.image))
 	return b.image, nil
 	return b.image, nil
 }
 }
 
 
-func initRunConfig() *runconfig.Config {
-	return &runconfig.Config{
-		PortSpecs: []string{},
-		// FIXME(erikh) this should be a type that lives in runconfig
-		ExposedPorts: map[nat.Port]struct{}{},
-		Env:          []string{},
-		Cmd:          []string{},
-
-		// FIXME(erikh) this should also be a type in runconfig
-		Volumes:    map[string]struct{}{},
-		Entrypoint: []string{"/bin/sh", "-c"},
-		OnBuild:    []string{},
-	}
-}
-
 // This method is the entrypoint to all statement handling routines.
 // This method is the entrypoint to all statement handling routines.
 //
 //
 // Almost all nodes will have this structure:
 // Almost all nodes will have this structure:
 // Child[Node, Node, Node] where Child is from parser.Node.Children and each
 // Child[Node, Node, Node] where Child is from parser.Node.Children and each
 // node comes from parser.Node.Next. This forms a "line" with a statement and
 // node comes from parser.Node.Next. This forms a "line" with a statement and
 // arguments and we process them in this normalized form by hitting
 // arguments and we process them in this normalized form by hitting
-// evaluateTable with the leaf nodes of the command and the buildFile object.
+// evaluateTable with the leaf nodes of the command and the BuildFile object.
 //
 //
 // ONBUILD is a special case; in this case the parser will emit:
 // ONBUILD is a special case; in this case the parser will emit:
 // Child[Node, Child[Node, Node...]] where the first node is the literal
 // Child[Node, Child[Node, Node...]] where the first node is the literal
@@ -205,12 +179,12 @@ func initRunConfig() *runconfig.Config {
 // such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
 // such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
 // deal with that, at least until it becomes more of a general concern with new
 // deal with that, at least until it becomes more of a general concern with new
 // features.
 // features.
-func (b *buildFile) dispatch(stepN int, ast *parser.Node) error {
+func (b *BuildFile) dispatch(stepN int, ast *parser.Node) error {
 	cmd := ast.Value
 	cmd := ast.Value
 	strs := []string{}
 	strs := []string{}
 
 
 	if cmd == "onbuild" {
 	if cmd == "onbuild" {
-		fmt.Fprintf(b.options.OutStream, "%#v\n", ast.Next.Children[0].Value)
+		fmt.Fprintf(b.Options.OutStream, "%#v\n", ast.Next.Children[0].Value)
 		ast = ast.Next.Children[0]
 		ast = ast.Next.Children[0]
 		strs = append(strs, ast.Value)
 		strs = append(strs, ast.Value)
 	}
 	}
@@ -220,7 +194,7 @@ func (b *buildFile) dispatch(stepN int, ast *parser.Node) error {
 		strs = append(strs, replaceEnv(b, ast.Value))
 		strs = append(strs, replaceEnv(b, ast.Value))
 	}
 	}
 
 
-	fmt.Fprintf(b.options.OutStream, "Step %d : %s %s\n", stepN, strings.ToUpper(cmd), strings.Join(strs, " "))
+	fmt.Fprintf(b.Options.OutStream, "Step %d : %s %s\n", stepN, strings.ToUpper(cmd), strings.Join(strs, " "))
 
 
 	// XXX yes, we skip any cmds that are not valid; the parser should have
 	// XXX yes, we skip any cmds that are not valid; the parser should have
 	// picked these out already.
 	// picked these out already.

+ 65 - 65
builder/evaluator/internals.go

@@ -30,7 +30,7 @@ import (
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
 )
 )
 
 
-func (b *buildFile) readContext(context io.Reader) error {
+func (b *BuildFile) readContext(context io.Reader) error {
 	tmpdirPath, err := ioutil.TempDir("", "docker-build")
 	tmpdirPath, err := ioutil.TempDir("", "docker-build")
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -50,15 +50,15 @@ func (b *buildFile) readContext(context io.Reader) error {
 	return nil
 	return nil
 }
 }
 
 
-func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
+func (b *BuildFile) commit(id string, autoCmd []string, comment string) error {
 	if b.image == "" {
 	if b.image == "" {
 		return fmt.Errorf("Please provide a source image with `from` prior to commit")
 		return fmt.Errorf("Please provide a source image with `from` prior to commit")
 	}
 	}
-	b.config.Image = b.image
+	b.Config.Image = b.image
 	if id == "" {
 	if id == "" {
-		cmd := b.config.Cmd
-		b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
-		defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
+		cmd := b.Config.Cmd
+		b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
+		defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
 
 
 		hit, err := b.probeCache()
 		hit, err := b.probeCache()
 		if err != nil {
 		if err != nil {
@@ -68,15 +68,15 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 			return nil
 			return nil
 		}
 		}
 
 
-		container, warnings, err := b.options.Daemon.Create(b.config, "")
+		container, warnings, err := b.Options.Daemon.Create(b.Config, "")
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
 		for _, warning := range warnings {
 		for _, warning := range warnings {
-			fmt.Fprintf(b.options.OutStream, " ---> [Warning] %s\n", warning)
+			fmt.Fprintf(b.Options.OutStream, " ---> [Warning] %s\n", warning)
 		}
 		}
-		b.tmpContainers[container.ID] = struct{}{}
-		fmt.Fprintf(b.options.OutStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
+		b.TmpContainers[container.ID] = struct{}{}
+		fmt.Fprintf(b.Options.OutStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
 		id = container.ID
 		id = container.ID
 
 
 		if err := container.Mount(); err != nil {
 		if err := container.Mount(); err != nil {
@@ -84,25 +84,25 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 		}
 		}
 		defer container.Unmount()
 		defer container.Unmount()
 	}
 	}
-	container := b.options.Daemon.Get(id)
+	container := b.Options.Daemon.Get(id)
 	if container == nil {
 	if container == nil {
 		return fmt.Errorf("An error occured while creating the container")
 		return fmt.Errorf("An error occured while creating the container")
 	}
 	}
 
 
 	// Note: Actually copy the struct
 	// Note: Actually copy the struct
-	autoConfig := *b.config
+	autoConfig := *b.Config
 	autoConfig.Cmd = autoCmd
 	autoConfig.Cmd = autoCmd
 	// Commit the container
 	// Commit the container
-	image, err := b.options.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
+	image, err := b.Options.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	b.tmpImages[image.ID] = struct{}{}
+	b.TmpImages[image.ID] = struct{}{}
 	b.image = image.ID
 	b.image = image.ID
 	return nil
 	return nil
 }
 }
 
 
-func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
+func (b *BuildFile) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
 	if b.context == nil {
 	if b.context == nil {
 		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
 		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
 	}
 	}
@@ -114,10 +114,10 @@ func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDeco
 	orig := args[0]
 	orig := args[0]
 	dest := args[1]
 	dest := args[1]
 
 
-	cmd := b.config.Cmd
-	b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
-	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
-	b.config.Image = b.image
+	cmd := b.Config.Cmd
+	b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
+	defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
+	b.Config.Image = b.image
 
 
 	var (
 	var (
 		origPath   = orig
 		origPath   = orig
@@ -201,7 +201,7 @@ func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDeco
 	}
 	}
 
 
 	// Hash path and check the cache
 	// Hash path and check the cache
-	if b.options.UtilizeCache {
+	if b.Options.UtilizeCache {
 		var (
 		var (
 			hash string
 			hash string
 			sums = b.context.GetSums()
 			sums = b.context.GetSums()
@@ -233,7 +233,7 @@ func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDeco
 				hash = "file:" + h
 				hash = "file:" + h
 			}
 			}
 		}
 		}
-		b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)}
+		b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)}
 		hit, err := b.probeCache()
 		hit, err := b.probeCache()
 		if err != nil {
 		if err != nil {
 			return err
 			return err
@@ -245,11 +245,11 @@ func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDeco
 	}
 	}
 
 
 	// Create the container
 	// Create the container
-	container, _, err := b.options.Daemon.Create(b.config, "")
+	container, _, err := b.Options.Daemon.Create(b.Config, "")
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	b.tmpContainers[container.ID] = struct{}{}
+	b.TmpContainers[container.ID] = struct{}{}
 
 
 	if err := container.Mount(); err != nil {
 	if err := container.Mount(); err != nil {
 		return err
 		return err
@@ -269,27 +269,27 @@ func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDeco
 	return nil
 	return nil
 }
 }
 
 
-func (b *buildFile) pullImage(name string) (*imagepkg.Image, error) {
+func (b *BuildFile) pullImage(name string) (*imagepkg.Image, error) {
 	remote, tag := parsers.ParseRepositoryTag(name)
 	remote, tag := parsers.ParseRepositoryTag(name)
-	pullRegistryAuth := b.options.AuthConfig
-	if len(b.options.AuthConfigFile.Configs) > 0 {
+	pullRegistryAuth := b.Options.AuthConfig
+	if len(b.Options.AuthConfigFile.Configs) > 0 {
 		// The request came with a full auth config file, we prefer to use that
 		// The request came with a full auth config file, we prefer to use that
 		endpoint, _, err := registry.ResolveRepositoryName(remote)
 		endpoint, _, err := registry.ResolveRepositoryName(remote)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
-		resolvedAuth := b.options.AuthConfigFile.ResolveAuthConfig(endpoint)
+		resolvedAuth := b.Options.AuthConfigFile.ResolveAuthConfig(endpoint)
 		pullRegistryAuth = &resolvedAuth
 		pullRegistryAuth = &resolvedAuth
 	}
 	}
-	job := b.options.Engine.Job("pull", remote, tag)
-	job.SetenvBool("json", b.options.StreamFormatter.Json())
+	job := b.Options.Engine.Job("pull", remote, tag)
+	job.SetenvBool("json", b.Options.StreamFormatter.Json())
 	job.SetenvBool("parallel", true)
 	job.SetenvBool("parallel", true)
 	job.SetenvJson("authConfig", pullRegistryAuth)
 	job.SetenvJson("authConfig", pullRegistryAuth)
-	job.Stdout.Add(b.options.OutOld)
+	job.Stdout.Add(b.Options.OutOld)
 	if err := job.Run(); err != nil {
 	if err := job.Run(); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	image, err := b.options.Daemon.Repositories().LookupImage(name)
+	image, err := b.Options.Daemon.Repositories().LookupImage(name)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -297,23 +297,23 @@ func (b *buildFile) pullImage(name string) (*imagepkg.Image, error) {
 	return image, nil
 	return image, nil
 }
 }
 
 
-func (b *buildFile) processImageFrom(img *imagepkg.Image) error {
+func (b *BuildFile) processImageFrom(img *imagepkg.Image) error {
 	b.image = img.ID
 	b.image = img.ID
-	b.config = &runconfig.Config{}
+	b.Config = &runconfig.Config{}
 	if img.Config != nil {
 	if img.Config != nil {
-		b.config = img.Config
+		b.Config = img.Config
 	}
 	}
-	if b.config.Env == nil || len(b.config.Env) == 0 {
-		b.config.Env = append(b.config.Env, "PATH="+daemon.DefaultPathEnv)
+	if b.Config.Env == nil || len(b.Config.Env) == 0 {
+		b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
 	}
 	}
 	// Process ONBUILD triggers if they exist
 	// Process ONBUILD triggers if they exist
-	if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
-		fmt.Fprintf(b.options.ErrStream, "# Executing %d build triggers\n", nTriggers)
+	if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
+		fmt.Fprintf(b.Options.ErrStream, "# Executing %d build triggers\n", nTriggers)
 	}
 	}
 
 
 	// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
 	// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
-	onBuildTriggers := b.config.OnBuild
-	b.config.OnBuild = []string{}
+	onBuildTriggers := b.Config.OnBuild
+	b.Config.OnBuild = []string{}
 
 
 	// FIXME rewrite this so that builder/parser is used; right now steps in
 	// FIXME rewrite this so that builder/parser is used; right now steps in
 	// onbuild are muted because we have no good way to represent the step
 	// onbuild are muted because we have no good way to represent the step
@@ -343,17 +343,17 @@ func (b *buildFile) processImageFrom(img *imagepkg.Image) error {
 	return nil
 	return nil
 }
 }
 
 
-// probeCache checks to see if image-caching is enabled (`b.options.UtilizeCache`)
-// and if so attempts to look up the current `b.image` and `b.config` pair
-// in the current server `b.options.Daemon`. If an image is found, probeCache returns
+// probeCache checks to see if image-caching is enabled (`b.Options.UtilizeCache`)
+// and if so attempts to look up the current `b.image` and `b.Config` pair
+// in the current server `b.Options.Daemon`. If an image is found, probeCache returns
 // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
 // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
 // is any error, it returns `(false, err)`.
 // is any error, it returns `(false, err)`.
-func (b *buildFile) probeCache() (bool, error) {
-	if b.options.UtilizeCache {
-		if cache, err := b.options.Daemon.ImageGetCached(b.image, b.config); err != nil {
+func (b *BuildFile) probeCache() (bool, error) {
+	if b.Options.UtilizeCache {
+		if cache, err := b.Options.Daemon.ImageGetCached(b.image, b.Config); err != nil {
 			return false, err
 			return false, err
 		} else if cache != nil {
 		} else if cache != nil {
-			fmt.Fprintf(b.options.OutStream, " ---> Using cache\n")
+			fmt.Fprintf(b.Options.OutStream, " ---> Using cache\n")
 			utils.Debugf("[BUILDER] Use cached version")
 			utils.Debugf("[BUILDER] Use cached version")
 			b.image = cache.ID
 			b.image = cache.ID
 			return true, nil
 			return true, nil
@@ -364,37 +364,37 @@ func (b *buildFile) probeCache() (bool, error) {
 	return false, nil
 	return false, nil
 }
 }
 
 
-func (b *buildFile) create() (*daemon.Container, error) {
+func (b *BuildFile) create() (*daemon.Container, error) {
 	if b.image == "" {
 	if b.image == "" {
 		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
 		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
 	}
 	}
-	b.config.Image = b.image
+	b.Config.Image = b.image
 
 
 	// Create the container
 	// Create the container
-	c, _, err := b.options.Daemon.Create(b.config, "")
+	c, _, err := b.Options.Daemon.Create(b.Config, "")
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	b.tmpContainers[c.ID] = struct{}{}
-	fmt.Fprintf(b.options.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
+	b.TmpContainers[c.ID] = struct{}{}
+	fmt.Fprintf(b.Options.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
 
 
 	// override the entry point that may have been picked up from the base image
 	// override the entry point that may have been picked up from the base image
-	c.Path = b.config.Cmd[0]
-	c.Args = b.config.Cmd[1:]
+	c.Path = b.Config.Cmd[0]
+	c.Args = b.Config.Cmd[1:]
 
 
 	return c, nil
 	return c, nil
 }
 }
 
 
-func (b *buildFile) run(c *daemon.Container) error {
+func (b *BuildFile) run(c *daemon.Container) error {
 	var errCh chan error
 	var errCh chan error
-	if b.options.Verbose {
+	if b.Options.Verbose {
 		errCh = utils.Go(func() error {
 		errCh = utils.Go(func() error {
 			// FIXME: call the 'attach' job so that daemon.Attach can be made private
 			// FIXME: call the 'attach' job so that daemon.Attach can be made private
 			//
 			//
 			// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
 			// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
 			// but without hijacking for stdin. Also, with attach there can be race
 			// but without hijacking for stdin. Also, with attach there can be race
 			// condition because of some output already was printed before it.
 			// condition because of some output already was printed before it.
-			return <-b.options.Daemon.Attach(c, nil, nil, b.options.OutStream, b.options.ErrStream)
+			return <-b.Options.Daemon.Attach(c, nil, nil, b.Options.OutStream, b.Options.ErrStream)
 		})
 		})
 	}
 	}
 
 
@@ -412,7 +412,7 @@ func (b *buildFile) run(c *daemon.Container) error {
 	// Wait for it to finish
 	// Wait for it to finish
 	if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
 	if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
 		err := &utils.JSONError{
 		err := &utils.JSONError{
-			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
+			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
 			Code:    ret,
 			Code:    ret,
 		}
 		}
 		return err
 		return err
@@ -421,7 +421,7 @@ func (b *buildFile) run(c *daemon.Container) error {
 	return nil
 	return nil
 }
 }
 
 
-func (b *buildFile) checkPathForAddition(orig string) error {
+func (b *BuildFile) checkPathForAddition(orig string) error {
 	origPath := path.Join(b.contextPath, orig)
 	origPath := path.Join(b.contextPath, orig)
 	if p, err := filepath.EvalSymlinks(origPath); err != nil {
 	if p, err := filepath.EvalSymlinks(origPath); err != nil {
 		if os.IsNotExist(err) {
 		if os.IsNotExist(err) {
@@ -444,7 +444,7 @@ func (b *buildFile) checkPathForAddition(orig string) error {
 	return nil
 	return nil
 }
 }
 
 
-func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
+func (b *BuildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
 	var (
 	var (
 		err        error
 		err        error
 		destExists = true
 		destExists = true
@@ -549,14 +549,14 @@ func fixPermissions(destination string, uid, gid int) error {
 	})
 	})
 }
 }
 
 
-func (b *buildFile) clearTmp(containers map[string]struct{}) {
+func (b *BuildFile) clearTmp(containers map[string]struct{}) {
 	for c := range containers {
 	for c := range containers {
-		tmp := b.options.Daemon.Get(c)
-		if err := b.options.Daemon.Destroy(tmp); err != nil {
-			fmt.Fprintf(b.options.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
+		tmp := b.Options.Daemon.Get(c)
+		if err := b.Options.Daemon.Destroy(tmp); err != nil {
+			fmt.Fprintf(b.Options.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
 		} else {
 		} else {
 			delete(containers, c)
 			delete(containers, c)
-			fmt.Fprintf(b.options.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
+			fmt.Fprintf(b.Options.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
 		}
 		}
 	}
 	}
 }
 }

+ 2 - 2
builder/evaluator/support.go

@@ -10,12 +10,12 @@ var (
 )
 )
 
 
 // handle environment replacement. Used in dispatcher.
 // handle environment replacement. Used in dispatcher.
-func replaceEnv(b *buildFile, str string) string {
+func replaceEnv(b *BuildFile, str string) string {
 	for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) {
 	for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) {
 		match = match[strings.Index(match, "$"):]
 		match = match[strings.Index(match, "$"):]
 		matchKey := strings.Trim(match, "${}")
 		matchKey := strings.Trim(match, "${}")
 
 
-		for envKey, envValue := range b.env {
+		for envKey, envValue := range b.Env {
 			if matchKey == envKey {
 			if matchKey == envKey {
 				str = strings.Replace(str, match, envValue, -1)
 				str = strings.Replace(str, match, envValue, -1)
 			}
 			}

+ 119 - 0
builder/job.go

@@ -0,0 +1,119 @@
+package builder
+
+import (
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"strings"
+
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/builder/evaluator"
+	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/engine"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/utils"
+)
+
+type BuilderJob struct {
+	Engine *engine.Engine
+	Daemon *daemon.Daemon
+}
+
+func (b *BuilderJob) Install() {
+	b.Engine.Register("build", b.CmdBuild)
+}
+
+func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
+	if len(job.Args) != 0 {
+		return job.Errorf("Usage: %s\n", job.Name)
+	}
+	var (
+		remoteURL      = job.Getenv("remote")
+		repoName       = job.Getenv("t")
+		suppressOutput = job.GetenvBool("q")
+		noCache        = job.GetenvBool("nocache")
+		rm             = job.GetenvBool("rm")
+		forceRm        = job.GetenvBool("forcerm")
+		authConfig     = &registry.AuthConfig{}
+		configFile     = &registry.ConfigFile{}
+		tag            string
+		context        io.ReadCloser
+	)
+	job.GetenvJson("authConfig", authConfig)
+	job.GetenvJson("configFile", configFile)
+	repoName, tag = parsers.ParseRepositoryTag(repoName)
+
+	if remoteURL == "" {
+		context = ioutil.NopCloser(job.Stdin)
+	} else if utils.IsGIT(remoteURL) {
+		if !strings.HasPrefix(remoteURL, "git://") {
+			remoteURL = "https://" + remoteURL
+		}
+		root, err := ioutil.TempDir("", "docker-build-git")
+		if err != nil {
+			return job.Error(err)
+		}
+		defer os.RemoveAll(root)
+
+		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
+			return job.Errorf("Error trying to use git: %s (%s)", err, output)
+		}
+
+		c, err := archive.Tar(root, archive.Uncompressed)
+		if err != nil {
+			return job.Error(err)
+		}
+		context = c
+	} else if utils.IsURL(remoteURL) {
+		f, err := utils.Download(remoteURL)
+		if err != nil {
+			return job.Error(err)
+		}
+		defer f.Body.Close()
+		dockerFile, err := ioutil.ReadAll(f.Body)
+		if err != nil {
+			return job.Error(err)
+		}
+		c, err := archive.Generate("Dockerfile", string(dockerFile))
+		if err != nil {
+			return job.Error(err)
+		}
+		context = c
+	}
+	defer context.Close()
+
+	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
+
+	opts := &evaluator.BuildOpts{
+		Daemon: b.Daemon,
+		Engine: b.Engine,
+		OutStream: &utils.StdoutFormater{
+			Writer:          job.Stdout,
+			StreamFormatter: sf,
+		},
+		ErrStream: &utils.StderrFormater{
+			Writer:          job.Stdout,
+			StreamFormatter: sf,
+		},
+		Verbose:         !suppressOutput,
+		UtilizeCache:    !noCache,
+		Remove:          rm,
+		ForceRemove:     forceRm,
+		OutOld:          job.Stdout,
+		StreamFormatter: sf,
+		AuthConfig:      authConfig,
+		AuthConfigFile:  configFile,
+	}
+
+	id, err := NewBuilder(opts).Run(context)
+	if err != nil {
+		return job.Error(err)
+	}
+
+	if repoName != "" {
+		b.Daemon.Repositories().Set(repoName, tag, id, false)
+	}
+	return engine.StatusOK
+}

+ 0 - 1006
daemon/build.go

@@ -1,1006 +0,0 @@
-package daemon
-
-import (
-	"crypto/sha256"
-	"encoding/hex"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/url"
-	"os"
-	"os/exec"
-	"path"
-	"path/filepath"
-	"reflect"
-	"regexp"
-	"sort"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/docker/docker/archive"
-	"github.com/docker/docker/engine"
-	"github.com/docker/docker/nat"
-	"github.com/docker/docker/pkg/log"
-	"github.com/docker/docker/pkg/parsers"
-	"github.com/docker/docker/pkg/symlink"
-	"github.com/docker/docker/pkg/system"
-	"github.com/docker/docker/pkg/tarsum"
-	"github.com/docker/docker/registry"
-	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/utils"
-)
-
-func (daemon *Daemon) CmdBuild(job *engine.Job) engine.Status {
-	if len(job.Args) != 0 {
-		return job.Errorf("Usage: %s\n", job.Name)
-	}
-	var (
-		remoteURL      = job.Getenv("remote")
-		repoName       = job.Getenv("t")
-		suppressOutput = job.GetenvBool("q")
-		noCache        = job.GetenvBool("nocache")
-		rm             = job.GetenvBool("rm")
-		forceRm        = job.GetenvBool("forcerm")
-		authConfig     = &registry.AuthConfig{}
-		configFile     = &registry.ConfigFile{}
-		tag            string
-		context        io.ReadCloser
-	)
-	job.GetenvJson("authConfig", authConfig)
-	job.GetenvJson("configFile", configFile)
-	repoName, tag = parsers.ParseRepositoryTag(repoName)
-
-	if remoteURL == "" {
-		context = ioutil.NopCloser(job.Stdin)
-	} else if utils.IsGIT(remoteURL) {
-		if !strings.HasPrefix(remoteURL, "git://") {
-			remoteURL = "https://" + remoteURL
-		}
-		root, err := ioutil.TempDir("", "docker-build-git")
-		if err != nil {
-			return job.Error(err)
-		}
-		defer os.RemoveAll(root)
-
-		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
-			return job.Errorf("Error trying to use git: %s (%s)", err, output)
-		}
-
-		c, err := archive.Tar(root, archive.Uncompressed)
-		if err != nil {
-			return job.Error(err)
-		}
-		context = c
-	} else if utils.IsURL(remoteURL) {
-		f, err := utils.Download(remoteURL)
-		if err != nil {
-			return job.Error(err)
-		}
-		defer f.Body.Close()
-		dockerFile, err := ioutil.ReadAll(f.Body)
-		if err != nil {
-			return job.Error(err)
-		}
-		c, err := archive.Generate("Dockerfile", string(dockerFile))
-		if err != nil {
-			return job.Error(err)
-		}
-		context = c
-	}
-	defer context.Close()
-
-	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
-	b := NewBuildFile(daemon, daemon.eng,
-		&utils.StdoutFormater{
-			Writer:          job.Stdout,
-			StreamFormatter: sf,
-		},
-		&utils.StderrFormater{
-			Writer:          job.Stdout,
-			StreamFormatter: sf,
-		},
-		!suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile)
-	id, err := b.Build(context)
-	if err != nil {
-		return job.Error(err)
-	}
-	if repoName != "" {
-		daemon.Repositories().Set(repoName, tag, id, false)
-	}
-	return engine.StatusOK
-}
-
-var (
-	ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
-)
-
-type BuildFile interface {
-	Build(io.Reader) (string, error)
-	CmdFrom(string) error
-	CmdRun(string) error
-}
-
-type buildFile struct {
-	daemon *Daemon
-	eng    *engine.Engine
-
-	image      string
-	maintainer string
-	config     *runconfig.Config
-
-	contextPath string
-	context     *tarsum.TarSum
-
-	verbose      bool
-	utilizeCache bool
-	rm           bool
-	forceRm      bool
-
-	authConfig *registry.AuthConfig
-	configFile *registry.ConfigFile
-
-	tmpContainers map[string]struct{}
-	tmpImages     map[string]struct{}
-
-	outStream io.Writer
-	errStream io.Writer
-
-	// Deprecated, original writer used for ImagePull. To be removed.
-	outOld io.Writer
-	sf     *utils.StreamFormatter
-
-	// cmdSet indicates is CMD was set in current Dockerfile
-	cmdSet bool
-}
-
-func (b *buildFile) clearTmp(containers map[string]struct{}) {
-	for c := range containers {
-		tmp := b.daemon.Get(c)
-		if err := b.daemon.Destroy(tmp); err != nil {
-			fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
-		} else {
-			delete(containers, c)
-			fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c))
-		}
-	}
-}
-
-func (b *buildFile) CmdFrom(name string) error {
-	image, err := b.daemon.Repositories().LookupImage(name)
-	if err != nil {
-		if b.daemon.Graph().IsNotExist(err) {
-			remote, tag := parsers.ParseRepositoryTag(name)
-			pullRegistryAuth := b.authConfig
-			if len(b.configFile.Configs) > 0 {
-				// The request came with a full auth config file, we prefer to use that
-				endpoint, _, err := registry.ResolveRepositoryName(remote)
-				if err != nil {
-					return err
-				}
-				resolvedAuth := b.configFile.ResolveAuthConfig(endpoint)
-				pullRegistryAuth = &resolvedAuth
-			}
-			job := b.eng.Job("pull", remote, tag)
-			job.SetenvBool("json", b.sf.Json())
-			job.SetenvBool("parallel", true)
-			job.SetenvJson("authConfig", pullRegistryAuth)
-			job.Stdout.Add(b.outOld)
-			if err := job.Run(); err != nil {
-				return err
-			}
-			image, err = b.daemon.Repositories().LookupImage(name)
-			if err != nil {
-				return err
-			}
-		} else {
-			return err
-		}
-	}
-	b.image = image.ID
-	b.config = &runconfig.Config{}
-	if image.Config != nil {
-		b.config = image.Config
-	}
-	if b.config.Env == nil || len(b.config.Env) == 0 {
-		b.config.Env = append(b.config.Env, "PATH="+DefaultPathEnv)
-	}
-	// Process ONBUILD triggers if they exist
-	if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
-		fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers)
-	}
-
-	// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
-	onBuildTriggers := b.config.OnBuild
-	b.config.OnBuild = []string{}
-
-	for n, step := range onBuildTriggers {
-		splitStep := strings.Split(step, " ")
-		stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " "))
-		switch stepInstruction {
-		case "ONBUILD":
-			return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
-		case "MAINTAINER", "FROM":
-			return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
-		}
-		if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// The ONBUILD command declares a build instruction to be executed in any future build
-// using the current image as a base.
-func (b *buildFile) CmdOnbuild(trigger string) error {
-	splitTrigger := strings.Split(trigger, " ")
-	triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " "))
-	switch triggerInstruction {
-	case "ONBUILD":
-		return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
-	case "MAINTAINER", "FROM":
-		return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
-	}
-	b.config.OnBuild = append(b.config.OnBuild, trigger)
-	return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
-}
-
-func (b *buildFile) CmdMaintainer(name string) error {
-	b.maintainer = name
-	return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
-}
-
-// probeCache checks to see if image-caching is enabled (`b.utilizeCache`)
-// and if so attempts to look up the current `b.image` and `b.config` pair
-// in the current server `b.daemon`. If an image is found, probeCache returns
-// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
-// is any error, it returns `(false, err)`.
-func (b *buildFile) probeCache() (bool, error) {
-	if b.utilizeCache {
-		if cache, err := b.daemon.ImageGetCached(b.image, b.config); err != nil {
-			return false, err
-		} else if cache != nil {
-			fmt.Fprintf(b.outStream, " ---> Using cache\n")
-			log.Debugf("[BUILDER] Use cached version")
-			b.image = cache.ID
-			return true, nil
-		} else {
-			log.Debugf("[BUILDER] Cache miss")
-		}
-	}
-	return false, nil
-}
-
-func (b *buildFile) CmdRun(args string) error {
-	if b.image == "" {
-		return fmt.Errorf("Please provide a source image with `from` prior to run")
-	}
-	config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil)
-	if err != nil {
-		return err
-	}
-
-	cmd := b.config.Cmd
-	// set Cmd manually, this is special case only for Dockerfiles
-	b.config.Cmd = config.Cmd
-	runconfig.Merge(b.config, config)
-
-	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
-
-	log.Debugf("Command to be executed: %v", b.config.Cmd)
-
-	hit, err := b.probeCache()
-	if err != nil {
-		return err
-	}
-	if hit {
-		return nil
-	}
-
-	c, err := b.create()
-	if err != nil {
-		return err
-	}
-	// Ensure that we keep the container mounted until the commit
-	// to avoid unmounting and then mounting directly again
-	c.Mount()
-	defer c.Unmount()
-
-	err = b.run(c)
-	if err != nil {
-		return err
-	}
-	if err := b.commit(c.ID, cmd, "run"); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (b *buildFile) FindEnvKey(key string) int {
-	for k, envVar := range b.config.Env {
-		envParts := strings.SplitN(envVar, "=", 2)
-		if key == envParts[0] {
-			return k
-		}
-	}
-	return -1
-}
-
-func (b *buildFile) ReplaceEnvMatches(value string) (string, error) {
-	exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
-	if err != nil {
-		return value, err
-	}
-	matches := exp.FindAllString(value, -1)
-	for _, match := range matches {
-		match = match[strings.Index(match, "$"):]
-		matchKey := strings.Trim(match, "${}")
-
-		for _, envVar := range b.config.Env {
-			envParts := strings.SplitN(envVar, "=", 2)
-			envKey := envParts[0]
-			envValue := envParts[1]
-
-			if envKey == matchKey {
-				value = strings.Replace(value, match, envValue, -1)
-				break
-			}
-		}
-	}
-	return value, nil
-}
-
-func (b *buildFile) CmdEnv(args string) error {
-	tmp := strings.SplitN(args, " ", 2)
-	if len(tmp) != 2 {
-		return fmt.Errorf("Invalid ENV format")
-	}
-	key := strings.Trim(tmp[0], " \t")
-	value := strings.Trim(tmp[1], " \t")
-
-	envKey := b.FindEnvKey(key)
-	replacedValue, err := b.ReplaceEnvMatches(value)
-	if err != nil {
-		return err
-	}
-	replacedVar := fmt.Sprintf("%s=%s", key, replacedValue)
-
-	if envKey >= 0 {
-		b.config.Env[envKey] = replacedVar
-	} else {
-		b.config.Env = append(b.config.Env, replacedVar)
-	}
-	return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar))
-}
-
-func (b *buildFile) buildCmdFromJson(args string) []string {
-	var cmd []string
-	if err := json.Unmarshal([]byte(args), &cmd); err != nil {
-		log.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
-		cmd = []string{"/bin/sh", "-c", args}
-	}
-	return cmd
-}
-
-func (b *buildFile) CmdCmd(args string) error {
-	cmd := b.buildCmdFromJson(args)
-	b.config.Cmd = cmd
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
-		return err
-	}
-	b.cmdSet = true
-	return nil
-}
-
-func (b *buildFile) CmdEntrypoint(args string) error {
-	entrypoint := b.buildCmdFromJson(args)
-	b.config.Entrypoint = entrypoint
-	// if there is no cmd in current Dockerfile - cleanup cmd
-	if !b.cmdSet {
-		b.config.Cmd = nil
-	}
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (b *buildFile) CmdExpose(args string) error {
-	portsTab := strings.Split(args, " ")
-
-	if b.config.ExposedPorts == nil {
-		b.config.ExposedPorts = make(nat.PortSet)
-	}
-	ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...))
-	if err != nil {
-		return err
-	}
-	for port := range ports {
-		if _, exists := b.config.ExposedPorts[port]; !exists {
-			b.config.ExposedPorts[port] = struct{}{}
-		}
-	}
-	b.config.PortSpecs = nil
-
-	return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
-}
-
-func (b *buildFile) CmdUser(args string) error {
-	b.config.User = args
-	return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
-}
-
-func (b *buildFile) CmdInsert(args string) error {
-	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
-}
-
-func (b *buildFile) CmdCopy(args string) error {
-	return b.runContextCommand(args, false, false, "COPY")
-}
-
-func (b *buildFile) CmdWorkdir(workdir string) error {
-	if workdir[0] == '/' {
-		b.config.WorkingDir = workdir
-	} else {
-		if b.config.WorkingDir == "" {
-			b.config.WorkingDir = "/"
-		}
-		b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir)
-	}
-	return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
-}
-
-func (b *buildFile) CmdVolume(args string) error {
-	if args == "" {
-		return fmt.Errorf("Volume cannot be empty")
-	}
-
-	var volume []string
-	if err := json.Unmarshal([]byte(args), &volume); err != nil {
-		volume = []string{args}
-	}
-	if b.config.Volumes == nil {
-		b.config.Volumes = map[string]struct{}{}
-	}
-	for _, v := range volume {
-		b.config.Volumes[v] = struct{}{}
-	}
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (b *buildFile) checkPathForAddition(orig string) error {
-	origPath := path.Join(b.contextPath, orig)
-	origPath, err := filepath.EvalSymlinks(origPath)
-	if err != nil {
-		if os.IsNotExist(err) {
-			return fmt.Errorf("%s: no such file or directory", orig)
-		}
-		return err
-	}
-	if !strings.HasPrefix(origPath, b.contextPath) {
-		return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
-	}
-	if _, err := os.Stat(origPath); err != nil {
-		if os.IsNotExist(err) {
-			return fmt.Errorf("%s: no such file or directory", orig)
-		}
-		return err
-	}
-	return nil
-}
-
-func (b *buildFile) addContext(container *Container, orig, dest string, decompress bool) error {
-	var (
-		err        error
-		destExists = true
-		origPath   = path.Join(b.contextPath, orig)
-		destPath   = path.Join(container.RootfsPath(), dest)
-	)
-
-	if destPath != container.RootfsPath() {
-		destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
-		if err != nil {
-			return err
-		}
-	}
-
-	// Preserve the trailing '/'
-	if strings.HasSuffix(dest, "/") || dest == "." {
-		destPath = destPath + "/"
-	}
-
-	destStat, err := os.Stat(destPath)
-	if err != nil {
-		if !os.IsNotExist(err) {
-			return err
-		}
-		destExists = false
-	}
-
-	fi, err := os.Stat(origPath)
-	if err != nil {
-		if os.IsNotExist(err) {
-			return fmt.Errorf("%s: no such file or directory", orig)
-		}
-		return err
-	}
-
-	if fi.IsDir() {
-		return copyAsDirectory(origPath, destPath, destExists)
-	}
-
-	// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
-	if decompress {
-		// First try to unpack the source as an archive
-		// to support the untar feature we need to clean up the path a little bit
-		// because tar is very forgiving.  First we need to strip off the archive's
-		// filename from the path but this is only added if it does not end in / .
-		tarDest := destPath
-		if strings.HasSuffix(tarDest, "/") {
-			tarDest = filepath.Dir(destPath)
-		}
-
-		// try to successfully untar the orig
-		if err := archive.UntarPath(origPath, tarDest); err == nil {
-			return nil
-		} else if err != io.EOF {
-			log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
-		}
-	}
-
-	if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
-		return err
-	}
-	if err := archive.CopyWithTar(origPath, destPath); err != nil {
-		return err
-	}
-
-	resPath := destPath
-	if destExists && destStat.IsDir() {
-		resPath = path.Join(destPath, path.Base(origPath))
-	}
-
-	return fixPermissions(resPath, 0, 0)
-}
-
-func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error {
-	if b.context == nil {
-		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
-	}
-	tmp := strings.SplitN(args, " ", 2)
-	if len(tmp) != 2 {
-		return fmt.Errorf("Invalid %s format", cmdName)
-	}
-
-	orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t"))
-	if err != nil {
-		return err
-	}
-
-	dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t"))
-	if err != nil {
-		return err
-	}
-
-	cmd := b.config.Cmd
-	b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
-	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
-	b.config.Image = b.image
-
-	var (
-		origPath   = orig
-		destPath   = dest
-		remoteHash string
-		isRemote   bool
-		decompress = true
-	)
-
-	isRemote = utils.IsURL(orig)
-	if isRemote && !allowRemote {
-		return fmt.Errorf("Source can't be an URL for %s", cmdName)
-	} else if utils.IsURL(orig) {
-		// Initiate the download
-		resp, err := utils.Download(orig)
-		if err != nil {
-			return err
-		}
-
-		// Create a tmp dir
-		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
-		if err != nil {
-			return err
-		}
-
-		// Create a tmp file within our tmp dir
-		tmpFileName := path.Join(tmpDirName, "tmp")
-		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
-		if err != nil {
-			return err
-		}
-		defer os.RemoveAll(tmpDirName)
-
-		// Download and dump result to tmp file
-		if _, err := io.Copy(tmpFile, resp.Body); err != nil {
-			tmpFile.Close()
-			return err
-		}
-		tmpFile.Close()
-
-		// Remove the mtime of the newly created tmp file
-		if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
-			return err
-		}
-
-		origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
-
-		// Process the checksum
-		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
-		if err != nil {
-			return err
-		}
-		tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true}
-		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
-			return err
-		}
-		remoteHash = tarSum.Sum(nil)
-		r.Close()
-
-		// If the destination is a directory, figure out the filename.
-		if strings.HasSuffix(dest, "/") {
-			u, err := url.Parse(orig)
-			if err != nil {
-				return err
-			}
-			path := u.Path
-			if strings.HasSuffix(path, "/") {
-				path = path[:len(path)-1]
-			}
-			parts := strings.Split(path, "/")
-			filename := parts[len(parts)-1]
-			if filename == "" {
-				return fmt.Errorf("cannot determine filename from url: %s", u)
-			}
-			destPath = dest + filename
-		}
-	}
-
-	if err := b.checkPathForAddition(origPath); err != nil {
-		return err
-	}
-
-	// Hash path and check the cache
-	if b.utilizeCache {
-		var (
-			hash string
-			sums = b.context.GetSums()
-		)
-
-		if remoteHash != "" {
-			hash = remoteHash
-		} else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
-			return err
-		} else if fi.IsDir() {
-			var subfiles []string
-			for file, sum := range sums {
-				absFile := path.Join(b.contextPath, file)
-				absOrigPath := path.Join(b.contextPath, origPath)
-				if strings.HasPrefix(absFile, absOrigPath) {
-					subfiles = append(subfiles, sum)
-				}
-			}
-			sort.Strings(subfiles)
-			hasher := sha256.New()
-			hasher.Write([]byte(strings.Join(subfiles, ",")))
-			hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
-		} else {
-			if origPath[0] == '/' && len(origPath) > 1 {
-				origPath = origPath[1:]
-			}
-			origPath = strings.TrimPrefix(origPath, "./")
-			if h, ok := sums[origPath]; ok {
-				hash = "file:" + h
-			}
-		}
-		b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)}
-		hit, err := b.probeCache()
-		if err != nil {
-			return err
-		}
-		// If we do not have a hash, never use the cache
-		if hit && hash != "" {
-			return nil
-		}
-	}
-
-	// Create the container
-	container, _, err := b.daemon.Create(b.config, "")
-	if err != nil {
-		return err
-	}
-	b.tmpContainers[container.ID] = struct{}{}
-
-	if err := container.Mount(); err != nil {
-		return err
-	}
-	defer container.Unmount()
-
-	if !allowDecompression || isRemote {
-		decompress = false
-	}
-	if err := b.addContext(container, origPath, destPath, decompress); err != nil {
-		return err
-	}
-
-	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (b *buildFile) CmdAdd(args string) error {
-	return b.runContextCommand(args, true, true, "ADD")
-}
-
-func (b *buildFile) create() (*Container, error) {
-	if b.image == "" {
-		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
-	}
-	b.config.Image = b.image
-
-	// Create the container
-	c, _, err := b.daemon.Create(b.config, "")
-	if err != nil {
-		return nil, err
-	}
-	b.tmpContainers[c.ID] = struct{}{}
-	fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
-
-	// override the entry point that may have been picked up from the base image
-	c.Path = b.config.Cmd[0]
-	c.Args = b.config.Cmd[1:]
-
-	return c, nil
-}
-
-func (b *buildFile) run(c *Container) error {
-	var errCh chan error
-	if b.verbose {
-		errCh = utils.Go(func() error {
-			// FIXME: call the 'attach' job so that daemon.Attach can be made private
-			//
-			// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
-			// but without hijacking for stdin. Also, with attach there can be race
-			// condition because of some output already was printed before it.
-			return <-b.daemon.Attach(c, nil, nil, b.outStream, b.errStream)
-		})
-	}
-
-	//start the container
-	if err := c.Start(); err != nil {
-		return err
-	}
-
-	if errCh != nil {
-		if err := <-errCh; err != nil {
-			return err
-		}
-	}
-
-	// Wait for it to finish
-	if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
-		err := &utils.JSONError{
-			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
-			Code:    ret,
-		}
-		return err
-	}
-
-	return nil
-}
-
-// Commit the container <id> with the autorun command <autoCmd>
-func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
-	if b.image == "" {
-		return fmt.Errorf("Please provide a source image with `from` prior to commit")
-	}
-	b.config.Image = b.image
-	if id == "" {
-		cmd := b.config.Cmd
-		b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
-		defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
-
-		hit, err := b.probeCache()
-		if err != nil {
-			return err
-		}
-		if hit {
-			return nil
-		}
-
-		container, warnings, err := b.daemon.Create(b.config, "")
-		if err != nil {
-			return err
-		}
-		for _, warning := range warnings {
-			fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning)
-		}
-		b.tmpContainers[container.ID] = struct{}{}
-		fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
-		id = container.ID
-
-		if err := container.Mount(); err != nil {
-			return err
-		}
-		defer container.Unmount()
-	}
-	container := b.daemon.Get(id)
-	if container == nil {
-		return fmt.Errorf("An error occured while creating the container")
-	}
-
-	// Note: Actually copy the struct
-	autoConfig := *b.config
-	autoConfig.Cmd = autoCmd
-	// Commit the container
-	image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
-	if err != nil {
-		return err
-	}
-	b.tmpImages[image.ID] = struct{}{}
-	b.image = image.ID
-	return nil
-}
-
-// Long lines can be split with a backslash
-var lineContinuation = regexp.MustCompile(`\\\s*\n`)
-
-func (b *buildFile) Build(context io.Reader) (string, error) {
-	tmpdirPath, err := ioutil.TempDir("", "docker-build")
-	if err != nil {
-		return "", err
-	}
-
-	decompressedStream, err := archive.DecompressStream(context)
-	if err != nil {
-		return "", err
-	}
-
-	b.context = &tarsum.TarSum{Reader: decompressedStream, DisableCompression: true}
-	if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
-		return "", err
-	}
-	defer os.RemoveAll(tmpdirPath)
-
-	b.contextPath = tmpdirPath
-	filename := path.Join(tmpdirPath, "Dockerfile")
-	if _, err := os.Stat(filename); os.IsNotExist(err) {
-		return "", fmt.Errorf("Can't build a directory with no Dockerfile")
-	}
-	fileBytes, err := ioutil.ReadFile(filename)
-	if err != nil {
-		return "", err
-	}
-	if len(fileBytes) == 0 {
-		return "", ErrDockerfileEmpty
-	}
-	var (
-		dockerfile = lineContinuation.ReplaceAllString(stripComments(fileBytes), "")
-		stepN      = 0
-	)
-	for _, line := range strings.Split(dockerfile, "\n") {
-		line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n")
-		if len(line) == 0 {
-			continue
-		}
-		if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil {
-			if b.forceRm {
-				b.clearTmp(b.tmpContainers)
-			}
-			return "", err
-		} else if b.rm {
-			b.clearTmp(b.tmpContainers)
-		}
-		stepN++
-	}
-	if b.image != "" {
-		fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
-		return b.image, nil
-	}
-	return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
-}
-
-// BuildStep parses a single build step from `instruction` and executes it in the current context.
-func (b *buildFile) BuildStep(name, expression string) error {
-	fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression)
-	tmp := strings.SplitN(expression, " ", 2)
-	if len(tmp) != 2 {
-		return fmt.Errorf("Invalid Dockerfile format")
-	}
-	instruction := strings.ToLower(strings.Trim(tmp[0], " "))
-	arguments := strings.Trim(tmp[1], " ")
-
-	method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
-	if !exists {
-		fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
-		return nil
-	}
-
-	ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
-	if ret != nil {
-		return ret.(error)
-	}
-
-	fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
-	return nil
-}
-
-func stripComments(raw []byte) string {
-	var (
-		out   []string
-		lines = strings.Split(string(raw), "\n")
-	)
-	for _, l := range lines {
-		if len(l) == 0 || l[0] == '#' {
-			continue
-		}
-		out = append(out, l)
-	}
-	return strings.Join(out, "\n")
-}
-
-func copyAsDirectory(source, destination string, destinationExists bool) error {
-	if err := archive.CopyWithTar(source, destination); err != nil {
-		return err
-	}
-
-	if destinationExists {
-		files, err := ioutil.ReadDir(source)
-		if err != nil {
-			return err
-		}
-
-		for _, file := range files {
-			if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
-				return err
-			}
-		}
-		return nil
-	}
-
-	return fixPermissions(destination, 0, 0)
-}
-
-func fixPermissions(destination string, uid, gid int) error {
-	return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
-		if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
-			return err
-		}
-		return nil
-	})
-}
-
-func NewBuildFile(d *Daemon, eng *engine.Engine, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
-	return &buildFile{
-		daemon:        d,
-		eng:           eng,
-		config:        &runconfig.Config{},
-		outStream:     outStream,
-		errStream:     errStream,
-		tmpContainers: make(map[string]struct{}),
-		tmpImages:     make(map[string]struct{}),
-		verbose:       verbose,
-		utilizeCache:  utilizeCache,
-		rm:            rm,
-		forceRm:       forceRm,
-		sf:            sf,
-		authConfig:    auth,
-		configFile:    authConfigFile,
-		outOld:        outOld,
-	}
-}

+ 0 - 1
daemon/daemon.go

@@ -101,7 +101,6 @@ func (daemon *Daemon) Install(eng *engine.Engine) error {
 	// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
 	// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
 	for name, method := range map[string]engine.Handler{
 	for name, method := range map[string]engine.Handler{
 		"attach":            daemon.ContainerAttach,
 		"attach":            daemon.ContainerAttach,
-		"build":             daemon.CmdBuild,
 		"commit":            daemon.ContainerCommit,
 		"commit":            daemon.ContainerCommit,
 		"container_changes": daemon.ContainerChanges,
 		"container_changes": daemon.ContainerChanges,
 		"container_copy":    daemon.ContainerCopy,
 		"container_copy":    daemon.ContainerCopy,

+ 5 - 0
docker/daemon.go

@@ -5,6 +5,7 @@ package main
 import (
 import (
 	"log"
 	"log"
 
 
+	"github.com/docker/docker/builder"
 	"github.com/docker/docker/builtins"
 	"github.com/docker/docker/builtins"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon"
 	_ "github.com/docker/docker/daemon/execdriver/lxc"
 	_ "github.com/docker/docker/daemon/execdriver/lxc"
@@ -48,6 +49,10 @@ func mainDaemon() {
 		if err := d.Install(eng); err != nil {
 		if err := d.Install(eng); err != nil {
 			log.Fatal(err)
 			log.Fatal(err)
 		}
 		}
+
+		b := &builder.BuilderJob{eng, d}
+		b.Install()
+
 		// after the daemon is done setting up we can tell the api to start
 		// after the daemon is done setting up we can tell the api to start
 		// accepting connections
 		// accepting connections
 		if err := eng.Job("acceptconnections").Run(); err != nil {
 		if err := eng.Job("acceptconnections").Run(); err != nil {