Jelajahi Sumber

builder: add command handling to evaluator.

Docker-DCO-1.1-Signed-off-by: Erik Hollensbe <github@hollensbe.org> (github: erikh)
Erik Hollensbe 11 tahun lalu
induk
melakukan
d6c0bbc3cb

+ 0 - 2
builder/evaluator/builder/.gitignore

@@ -1,2 +0,0 @@
-builder
-Dockerfile

+ 0 - 31
builder/evaluator/builder/main.go

@@ -1,31 +0,0 @@
-package main
-
-import (
-	"os"
-
-	"github.com/erikh/buildfile/evaluator"
-)
-
-func main() {
-	if len(os.Args) < 2 {
-		os.Stderr.WriteString("Please supply filename(s) to evaluate")
-		os.Exit(1)
-	}
-
-	for _, fn := range os.Args[1:] {
-		f, err := os.Open(fn)
-		if err != nil {
-			panic(err)
-		}
-
-		opts := &evaluator.BuildOpts{}
-
-		bf, err := opts.NewBuildFile(f)
-		if err != nil {
-			panic(err)
-		}
-		if err := bf.Run(); err != nil {
-			panic(err)
-		}
-	}
-}

+ 205 - 6
builder/evaluator/dispatchers.go

@@ -2,10 +2,20 @@ package evaluator
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"path/filepath"
 	"strings"
 	"strings"
+
+	"github.com/docker/docker/nat"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
 )
 )
 
 
-func env(b *buildFile, args ...string) error {
+// dispatch with no layer / parsing.
+func nullDispatch(b *buildFile, args []string) error {
+	return nil
+}
+
+func env(b *buildFile, args []string) error {
 	if len(args) != 2 {
 	if len(args) != 2 {
 		return fmt.Errorf("ENV accepts two arguments")
 		return fmt.Errorf("ENV accepts two arguments")
 	}
 	}
@@ -14,12 +24,12 @@ func env(b *buildFile, args ...string) error {
 	// handling. This routine gets much shorter with the denormalization here.
 	// handling. This routine gets much shorter with the denormalization here.
 	key := args[0]
 	key := args[0]
 	b.env[key] = args[1]
 	b.env[key] = args[1]
-	b.config.Env = append(b.config.Env, strings.Join("=", key, b.env[key]))
+	b.config.Env = append(b.config.Env, strings.Join([]string{key, b.env[key]}, "="))
 
 
-	return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", value))
+	return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s=%s", key, b.env[key]))
 }
 }
 
 
-func maintainer(b *buildFile, args ...string) error {
+func maintainer(b *buildFile, args []string) error {
 	if len(args) != 1 {
 	if len(args) != 1 {
 		return fmt.Errorf("MAINTAINER requires only one argument")
 		return fmt.Errorf("MAINTAINER requires only one argument")
 	}
 	}
@@ -28,7 +38,7 @@ func maintainer(b *buildFile, args ...string) error {
 	return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
 	return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
 }
 }
 
 
-func add(b *buildFile, args ...string) error {
+func add(b *buildFile, args []string) error {
 	if len(args) != 2 {
 	if len(args) != 2 {
 		return fmt.Errorf("ADD requires two arguments")
 		return fmt.Errorf("ADD requires two arguments")
 	}
 	}
@@ -36,10 +46,199 @@ func add(b *buildFile, args ...string) error {
 	return b.runContextCommand(args, true, true, "ADD")
 	return b.runContextCommand(args, true, true, "ADD")
 }
 }
 
 
-func dispatchCopy(b *buildFile, args ...string) error {
+func dispatchCopy(b *buildFile, args []string) error {
 	if len(args) != 2 {
 	if len(args) != 2 {
 		return fmt.Errorf("COPY requires two arguments")
 		return fmt.Errorf("COPY requires two arguments")
 	}
 	}
 
 
 	return b.runContextCommand(args, false, false, "COPY")
 	return b.runContextCommand(args, false, false, "COPY")
 }
 }
+
+func from(b *buildFile, args []string) error {
+	if len(args) != 1 {
+		return fmt.Errorf("FROM requires one argument")
+	}
+
+	name := args[0]
+
+	image, err := b.options.Daemon.Repositories().LookupImage(name)
+	if err != nil {
+		if b.options.Daemon.Graph().IsNotExist(err) {
+			image, err = b.pullImage(name)
+		}
+
+		// note that the top level err will still be !nil here if IsNotExist is
+		// not the error. This approach just simplifies hte logic a bit.
+		if err != nil {
+			return err
+		}
+	}
+
+	return b.processImageFrom(image)
+}
+
+func onbuild(b *buildFile, args []string) error {
+	triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
+	switch triggerInstruction {
+	case "ONBUILD":
+		return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
+	case "MAINTAINER", "FROM":
+		return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
+	}
+
+	trigger := strings.Join(args, " ")
+
+	b.config.OnBuild = append(b.config.OnBuild, trigger)
+	return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
+}
+
+func workdir(b *buildFile, args []string) error {
+	if len(args) != 1 {
+		return fmt.Errorf("WORKDIR requires exactly one argument")
+	}
+
+	workdir := args[0]
+
+	if workdir[0] == '/' {
+		b.config.WorkingDir = workdir
+	} else {
+		if b.config.WorkingDir == "" {
+			b.config.WorkingDir = "/"
+		}
+		b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir)
+	}
+
+	return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
+}
+
+func run(b *buildFile, args []string) error {
+	if len(args) == 1 { // literal string command, not an exec array
+		args = append([]string{"/bin/sh", "-c"}, args[0])
+	}
+
+	if b.image == "" {
+		return fmt.Errorf("Please provide a source image with `from` prior to run")
+	}
+
+	config, _, _, err := runconfig.Parse(append([]string{b.image}, args...), nil)
+	if err != nil {
+		return err
+	}
+
+	cmd := b.config.Cmd
+	// set Cmd manually, this is special case only for Dockerfiles
+	b.config.Cmd = config.Cmd
+	runconfig.Merge(b.config, config)
+
+	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
+
+	utils.Debugf("Command to be executed: %v", b.config.Cmd)
+
+	hit, err := b.probeCache()
+	if err != nil {
+		return err
+	}
+	if hit {
+		return nil
+	}
+
+	c, err := b.create()
+	if err != nil {
+		return err
+	}
+	// Ensure that we keep the container mounted until the commit
+	// to avoid unmounting and then mounting directly again
+	c.Mount()
+	defer c.Unmount()
+
+	err = b.run(c)
+	if err != nil {
+		return err
+	}
+	if err := b.commit(c.ID, cmd, "run"); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func cmd(b *buildFile, args []string) error {
+	if len(args) < 2 {
+		args = append([]string{"/bin/sh", "-c"}, args...)
+	}
+
+	b.config.Cmd = args
+	if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
+		return err
+	}
+
+	b.cmdSet = true
+	return nil
+}
+
+func entrypoint(b *buildFile, args []string) error {
+	b.config.Entrypoint = args
+
+	// if there is no cmd in current Dockerfile - cleanup cmd
+	if !b.cmdSet {
+		b.config.Cmd = nil
+	}
+	if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
+		return err
+	}
+	return nil
+}
+
+func expose(b *buildFile, args []string) error {
+	portsTab := args
+
+	if b.config.ExposedPorts == nil {
+		b.config.ExposedPorts = make(nat.PortSet)
+	}
+
+	ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...))
+	if err != nil {
+		return err
+	}
+
+	for port := range ports {
+		if _, exists := b.config.ExposedPorts[port]; !exists {
+			b.config.ExposedPorts[port] = struct{}{}
+		}
+	}
+	b.config.PortSpecs = nil
+
+	return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
+}
+
+func user(b *buildFile, args []string) error {
+	if len(args) != 1 {
+		return fmt.Errorf("USER requires exactly one argument")
+	}
+
+	b.config.User = args[0]
+	return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
+}
+
+func volume(b *buildFile, args []string) error {
+	if len(args) != 1 {
+		return fmt.Errorf("Volume cannot be empty")
+	}
+
+	volume := args
+
+	if b.config.Volumes == nil {
+		b.config.Volumes = map[string]struct{}{}
+	}
+	for _, v := range volume {
+		b.config.Volumes[v] = struct{}{}
+	}
+	if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
+		return err
+	}
+	return nil
+}
+
+func insert(b *buildFile, args []string) error {
+	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
+}

+ 108 - 47
builder/evaluator/evaluator.go

@@ -1,38 +1,54 @@
 package evaluator
 package evaluator
 
 
 import (
 import (
+	"bytes"
+	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
-	"regexp"
+	"io/ioutil"
+	"os"
+	"path"
 	"strings"
 	"strings"
 
 
-	"github.com/erikh/buildfile/parser"
-
+	"github.com/docker/docker/builder/parser"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/engine"
 	"github.com/docker/docker/nat"
 	"github.com/docker/docker/nat"
+	"github.com/docker/docker/pkg/tarsum"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
 )
 )
 
 
 var (
 var (
-	evaluateTable = map[string]func(*buildFile, ...string) error{
-		"env":        env,
-		"maintainer": maintainer,
-		"add":        add,
-		"copy":       dispatchCopy, // copy() is a go builtin
-		//"onbuild":        parseMaybeJSON,
-		//"workdir":        parseString,
-		//"docker-version": parseString,
-		//"run":            parseMaybeJSON,
-		//"cmd":            parseMaybeJSON,
-		//"entrypoint":     parseMaybeJSON,
-		//"expose":         parseMaybeJSON,
-		//"volume":         parseMaybeJSON,
-	}
+	ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
 )
 )
 
 
+var evaluateTable map[string]func(*buildFile, []string) error
+
+func init() {
+	evaluateTable = map[string]func(*buildFile, []string) error{
+		"env":            env,
+		"maintainer":     maintainer,
+		"add":            add,
+		"copy":           dispatchCopy, // copy() is a go builtin
+		"from":           from,
+		"onbuild":        onbuild,
+		"workdir":        workdir,
+		"docker-version": nullDispatch, // we don't care about docker-version
+		"run":            run,
+		"cmd":            cmd,
+		"entrypoint":     entrypoint,
+		"expose":         expose,
+		"volume":         volume,
+		"user":           user,
+		"insert":         insert,
+	}
+}
+
+type envMap map[string]string
+type uniqueMap map[string]struct{}
+
 type buildFile struct {
 type buildFile struct {
 	dockerfile *parser.Node
 	dockerfile *parser.Node
 	env        envMap
 	env        envMap
@@ -40,48 +56,86 @@ type buildFile struct {
 	config     *runconfig.Config
 	config     *runconfig.Config
 	options    *BuildOpts
 	options    *BuildOpts
 	maintainer string
 	maintainer string
+
+	// cmdSet indicates is CMD was set in current Dockerfile
+	cmdSet bool
+
+	context       *tarsum.TarSum
+	contextPath   string
+	tmpContainers uniqueMap
+	tmpImages     uniqueMap
 }
 }
 
 
 type BuildOpts struct {
 type BuildOpts struct {
-	Daemon          *daemon.Daemon
-	Engine          *engine.Engine
-	OutStream       io.Writer
-	ErrStream       io.Writer
-	Verbose         bool
-	UtilizeCache    bool
-	Remove          bool
-	ForceRm         bool
+	Daemon         *daemon.Daemon
+	Engine         *engine.Engine
+	OutStream      io.Writer
+	ErrStream      io.Writer
+	Verbose        bool
+	UtilizeCache   bool
+	Remove         bool
+	ForceRemove    bool
+	AuthConfig     *registry.AuthConfig
+	AuthConfigFile *registry.ConfigFile
+
+	// Deprecated, original writer used for ImagePull. To be removed.
 	OutOld          io.Writer
 	OutOld          io.Writer
 	StreamFormatter *utils.StreamFormatter
 	StreamFormatter *utils.StreamFormatter
-	Auth            *registry.AuthConfig
-	AuthConfigFile  *registry.ConfigFile
 }
 }
 
 
-func NewBuildFile(file io.ReadWriteCloser, opts *BuildOpts) (*buildFile, error) {
-	defer file.Close()
-	ast, err := parser.Parse(file)
-	if err != nil {
-		return nil, err
-	}
-
+func NewBuilder(opts *BuildOpts) (*buildFile, error) {
 	return &buildFile{
 	return &buildFile{
-		dockerfile: ast,
-		env:        envMap{},
-		config:     initRunConfig(),
-		options:    opts,
+		dockerfile:    nil,
+		env:           envMap{},
+		config:        initRunConfig(),
+		options:       opts,
+		tmpContainers: make(uniqueMap),
+		tmpImages:     make(uniqueMap),
 	}, nil
 	}, nil
 }
 }
 
 
-func (b *buildFile) Run() error {
-	node := b.dockerfile
+func (b *buildFile) Run(context io.Reader) (string, error) {
+	err := b.readContext(context)
+
+	if err != nil {
+		return "", err
+	}
+
+	filename := path.Join(b.contextPath, "Dockerfile")
+	if _, err := os.Stat(filename); os.IsNotExist(err) {
+		return "", fmt.Errorf("Cannot build a directory without a Dockerfile")
+	}
+	fileBytes, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return "", err
+	}
+	if len(fileBytes) == 0 {
+		return "", ErrDockerfileEmpty
+	}
+	ast, err := parser.Parse(bytes.NewReader(fileBytes))
+	if err != nil {
+		return "", err
+	}
+
+	b.dockerfile = ast
 
 
-	for i, n := range node.Children {
+	for i, n := range b.dockerfile.Children {
 		if err := b.dispatch(i, n); err != nil {
 		if err := b.dispatch(i, n); err != nil {
-			return err
+			if b.options.ForceRemove {
+				b.clearTmp(b.tmpContainers)
+			}
+			return "", err
+		} else if b.options.Remove {
+			b.clearTmp(b.tmpContainers)
 		}
 		}
 	}
 	}
 
 
-	return nil
+	if b.image == "" {
+		return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
+	}
+
+	fmt.Fprintf(b.options.OutStream, "Successfully built %s\n", utils.TruncateID(b.image))
+	return b.image, nil
 }
 }
 
 
 func initRunConfig() *runconfig.Config {
 func initRunConfig() *runconfig.Config {
@@ -94,7 +148,7 @@ func initRunConfig() *runconfig.Config {
 
 
 		// FIXME(erikh) this should also be a type in runconfig
 		// FIXME(erikh) this should also be a type in runconfig
 		Volumes:    map[string]struct{}{},
 		Volumes:    map[string]struct{}{},
-		Entrypoint: []string{},
+		Entrypoint: []string{"/bin/sh", "-c"},
 		OnBuild:    []string{},
 		OnBuild:    []string{},
 	}
 	}
 }
 }
@@ -102,17 +156,24 @@ func initRunConfig() *runconfig.Config {
 func (b *buildFile) dispatch(stepN int, ast *parser.Node) error {
 func (b *buildFile) dispatch(stepN int, ast *parser.Node) error {
 	cmd := ast.Value
 	cmd := ast.Value
 	strs := []string{}
 	strs := []string{}
+
+	if cmd == "onbuild" {
+		fmt.Fprintf(b.options.OutStream, "%#v\n", ast.Next.Children[0].Value)
+		ast = ast.Next.Children[0]
+		strs = append(strs, ast.Value)
+	}
+
 	for ast.Next != nil {
 	for ast.Next != nil {
 		ast = ast.Next
 		ast = ast.Next
-		strs = append(strs, replaceEnv(b, stripQuotes(ast.Value)))
+		strs = append(strs, replaceEnv(b, ast.Value))
 	}
 	}
 
 
-	fmt.Fprintf(b.outStream, "Step %d : %s\n", i, cmd, expression)
+	fmt.Fprintf(b.options.OutStream, "Step %d : %s %s\n", stepN, strings.ToUpper(cmd), strings.Join(strs, " "))
 
 
 	// XXX yes, we skip any cmds that are not valid; the parser should have
 	// XXX yes, we skip any cmds that are not valid; the parser should have
 	// picked these out already.
 	// picked these out already.
 	if f, ok := evaluateTable[cmd]; ok {
 	if f, ok := evaluateTable[cmd]; ok {
-		return f(b, strs...)
+		return f(b, strs)
 	}
 	}
 
 
 	return nil
 	return nil

+ 333 - 21
builder/evaluator/internals.go

@@ -1,6 +1,33 @@
 package evaluator
 package evaluator
 
 
-func (b *buildFile) addContext(context io.Reader) (string, error) {
+import (
+	"crypto/sha256"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/url"
+	"os"
+	"path"
+	"path/filepath"
+	"sort"
+	"strings"
+	"syscall"
+	"time"
+
+	"github.com/docker/docker/archive"
+	"github.com/docker/docker/daemon"
+	imagepkg "github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/symlink"
+	"github.com/docker/docker/pkg/system"
+	"github.com/docker/docker/pkg/tarsum"
+	"github.com/docker/docker/registry"
+	"github.com/docker/docker/runconfig"
+	"github.com/docker/docker/utils"
+)
+
+func (b *buildFile) readContext(context io.Reader) error {
 	tmpdirPath, err := ioutil.TempDir("", "docker-build")
 	tmpdirPath, err := ioutil.TempDir("", "docker-build")
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -17,7 +44,7 @@ func (b *buildFile) addContext(context io.Reader) (string, error) {
 	}
 	}
 
 
 	b.contextPath = tmpdirPath
 	b.contextPath = tmpdirPath
-	return tmpdirPath
+	return nil
 }
 }
 
 
 func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
@@ -38,15 +65,15 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 			return nil
 			return nil
 		}
 		}
 
 
-		container, warnings, err := b.daemon.Create(b.config, "")
+		container, warnings, err := b.options.Daemon.Create(b.config, "")
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
 		for _, warning := range warnings {
 		for _, warning := range warnings {
-			fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning)
+			fmt.Fprintf(b.options.OutStream, " ---> [Warning] %s\n", warning)
 		}
 		}
 		b.tmpContainers[container.ID] = struct{}{}
 		b.tmpContainers[container.ID] = struct{}{}
-		fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
+		fmt.Fprintf(b.options.OutStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
 		id = container.ID
 		id = container.ID
 
 
 		if err := container.Mount(); err != nil {
 		if err := container.Mount(); err != nil {
@@ -54,7 +81,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 		}
 		}
 		defer container.Unmount()
 		defer container.Unmount()
 	}
 	}
-	container := b.daemon.Get(id)
+	container := b.options.Daemon.Get(id)
 	if container == nil {
 	if container == nil {
 		return fmt.Errorf("An error occured while creating the container")
 		return fmt.Errorf("An error occured while creating the container")
 	}
 	}
@@ -63,7 +90,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 	autoConfig := *b.config
 	autoConfig := *b.config
 	autoConfig.Cmd = autoCmd
 	autoConfig.Cmd = autoCmd
 	// Commit the container
 	// Commit the container
-	image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
+	image, err := b.options.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -72,24 +99,17 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
 	return nil
 	return nil
 }
 }
 
 
-func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error {
+func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
 	if b.context == nil {
 	if b.context == nil {
 		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
 		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
 	}
 	}
-	tmp := strings.SplitN(args, " ", 2)
-	if len(tmp) != 2 {
-		return fmt.Errorf("Invalid %s format", cmdName)
-	}
 
 
-	orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t"))
-	if err != nil {
-		return err
+	if len(args) != 2 {
+		return fmt.Errorf("Invalid %s format", cmdName)
 	}
 	}
 
 
-	dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t"))
-	if err != nil {
-		return err
-	}
+	orig := args[0]
+	dest := args[1]
 
 
 	cmd := b.config.Cmd
 	cmd := b.config.Cmd
 	b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
 	b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
@@ -178,7 +198,7 @@ func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecomp
 	}
 	}
 
 
 	// Hash path and check the cache
 	// Hash path and check the cache
-	if b.utilizeCache {
+	if b.options.UtilizeCache {
 		var (
 		var (
 			hash string
 			hash string
 			sums = b.context.GetSums()
 			sums = b.context.GetSums()
@@ -222,7 +242,7 @@ func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecomp
 	}
 	}
 
 
 	// Create the container
 	// Create the container
-	container, _, err := b.daemon.Create(b.config, "")
+	container, _, err := b.options.Daemon.Create(b.config, "")
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -245,3 +265,295 @@ func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecomp
 	}
 	}
 	return nil
 	return nil
 }
 }
+
+func (b *buildFile) pullImage(name string) (*imagepkg.Image, error) {
+	remote, tag := parsers.ParseRepositoryTag(name)
+	pullRegistryAuth := b.options.AuthConfig
+	if len(b.options.AuthConfigFile.Configs) > 0 {
+		// The request came with a full auth config file, we prefer to use that
+		endpoint, _, err := registry.ResolveRepositoryName(remote)
+		if err != nil {
+			return nil, err
+		}
+		resolvedAuth := b.options.AuthConfigFile.ResolveAuthConfig(endpoint)
+		pullRegistryAuth = &resolvedAuth
+	}
+	job := b.options.Engine.Job("pull", remote, tag)
+	job.SetenvBool("json", b.options.StreamFormatter.Json())
+	job.SetenvBool("parallel", true)
+	job.SetenvJson("authConfig", pullRegistryAuth)
+	job.Stdout.Add(b.options.OutOld)
+	if err := job.Run(); err != nil {
+		return nil, err
+	}
+	image, err := b.options.Daemon.Repositories().LookupImage(name)
+	if err != nil {
+		return nil, err
+	}
+
+	return image, nil
+}
+
+func (b *buildFile) processImageFrom(img *imagepkg.Image) error {
+	b.image = img.ID
+	b.config = &runconfig.Config{}
+	if img.Config != nil {
+		b.config = img.Config
+	}
+	if b.config.Env == nil || len(b.config.Env) == 0 {
+		b.config.Env = append(b.config.Env, "PATH="+daemon.DefaultPathEnv)
+	}
+	// Process ONBUILD triggers if they exist
+	if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
+		fmt.Fprintf(b.options.ErrStream, "# Executing %d build triggers\n", nTriggers)
+	}
+
+	// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
+	onBuildTriggers := b.config.OnBuild
+	b.config.OnBuild = []string{}
+
+	// FIXME rewrite this so that builder/parser is used; right now steps in
+	// onbuild are muted because we have no good way to represent the step
+	// number
+	for _, step := range onBuildTriggers {
+		splitStep := strings.Split(step, " ")
+		stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " "))
+		switch stepInstruction {
+		case "ONBUILD":
+			return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
+		case "MAINTAINER", "FROM":
+			return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
+		}
+
+		// FIXME we have to run the evaluator manually here. This does not belong
+		// in this function.
+
+		if f, ok := evaluateTable[strings.ToLower(stepInstruction)]; ok {
+			if err := f(b, splitStep[1:]); err != nil {
+				return err
+			}
+		} else {
+			return fmt.Errorf("%s doesn't appear to be a valid Dockerfile instruction", splitStep[0])
+		}
+	}
+
+	return nil
+}
+
+// probeCache checks to see if image-caching is enabled (`b.options.UtilizeCache`)
+// and if so attempts to look up the current `b.image` and `b.config` pair
+// in the current server `b.options.Daemon`. If an image is found, probeCache returns
+// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
+// is any error, it returns `(false, err)`.
+func (b *buildFile) probeCache() (bool, error) {
+	if b.options.UtilizeCache {
+		if cache, err := b.options.Daemon.ImageGetCached(b.image, b.config); err != nil {
+			return false, err
+		} else if cache != nil {
+			fmt.Fprintf(b.options.OutStream, " ---> Using cache\n")
+			utils.Debugf("[BUILDER] Use cached version")
+			b.image = cache.ID
+			return true, nil
+		} else {
+			utils.Debugf("[BUILDER] Cache miss")
+		}
+	}
+	return false, nil
+}
+
+func (b *buildFile) create() (*daemon.Container, error) {
+	if b.image == "" {
+		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
+	}
+	b.config.Image = b.image
+
+	// Create the container
+	c, _, err := b.options.Daemon.Create(b.config, "")
+	if err != nil {
+		return nil, err
+	}
+	b.tmpContainers[c.ID] = struct{}{}
+	fmt.Fprintf(b.options.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
+
+	// override the entry point that may have been picked up from the base image
+	c.Path = b.config.Cmd[0]
+	c.Args = b.config.Cmd[1:]
+
+	return c, nil
+}
+
+func (b *buildFile) run(c *daemon.Container) error {
+	var errCh chan error
+	if b.options.Verbose {
+		errCh = utils.Go(func() error {
+			// FIXME: call the 'attach' job so that daemon.Attach can be made private
+			//
+			// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
+			// but without hijacking for stdin. Also, with attach there can be race
+			// condition because of some output already was printed before it.
+			return <-b.options.Daemon.Attach(c, nil, nil, b.options.OutStream, b.options.ErrStream)
+		})
+	}
+
+	//start the container
+	if err := c.Start(); err != nil {
+		return err
+	}
+
+	if errCh != nil {
+		if err := <-errCh; err != nil {
+			return err
+		}
+	}
+
+	// Wait for it to finish
+	if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
+		err := &utils.JSONError{
+			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
+			Code:    ret,
+		}
+		return err
+	}
+
+	return nil
+}
+
+func (b *buildFile) checkPathForAddition(orig string) error {
+	origPath := path.Join(b.contextPath, orig)
+	if p, err := filepath.EvalSymlinks(origPath); err != nil {
+		if os.IsNotExist(err) {
+			return fmt.Errorf("%s: no such file or directory", orig)
+		}
+		return err
+	} else {
+		origPath = p
+	}
+	if !strings.HasPrefix(origPath, b.contextPath) {
+		return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
+	}
+	_, err := os.Stat(origPath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return fmt.Errorf("%s: no such file or directory", orig)
+		}
+		return err
+	}
+	return nil
+}
+
+func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
+	var (
+		err        error
+		destExists = true
+		origPath   = path.Join(b.contextPath, orig)
+		destPath   = path.Join(container.RootfsPath(), dest)
+	)
+
+	if destPath != container.RootfsPath() {
+		destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
+		if err != nil {
+			return err
+		}
+	}
+
+	// Preserve the trailing '/'
+	if strings.HasSuffix(dest, "/") || dest == "." {
+		destPath = destPath + "/"
+	}
+
+	destStat, err := os.Stat(destPath)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return err
+		}
+		destExists = false
+	}
+
+	fi, err := os.Stat(origPath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return fmt.Errorf("%s: no such file or directory", orig)
+		}
+		return err
+	}
+
+	if fi.IsDir() {
+		return copyAsDirectory(origPath, destPath, destExists)
+	}
+
+	// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
+	if decompress {
+		// First try to unpack the source as an archive
+		// to support the untar feature we need to clean up the path a little bit
+		// because tar is very forgiving.  First we need to strip off the archive's
+		// filename from the path but this is only added if it does not end in / .
+		tarDest := destPath
+		if strings.HasSuffix(tarDest, "/") {
+			tarDest = filepath.Dir(destPath)
+		}
+
+		// try to successfully untar the orig
+		if err := archive.UntarPath(origPath, tarDest); err == nil {
+			return nil
+		} else if err != io.EOF {
+			utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
+		}
+	}
+
+	if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
+		return err
+	}
+	if err := archive.CopyWithTar(origPath, destPath); err != nil {
+		return err
+	}
+
+	resPath := destPath
+	if destExists && destStat.IsDir() {
+		resPath = path.Join(destPath, path.Base(origPath))
+	}
+
+	return fixPermissions(resPath, 0, 0)
+}
+
+func copyAsDirectory(source, destination string, destinationExists bool) error {
+	if err := archive.CopyWithTar(source, destination); err != nil {
+		return err
+	}
+
+	if destinationExists {
+		files, err := ioutil.ReadDir(source)
+		if err != nil {
+			return err
+		}
+
+		for _, file := range files {
+			if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+
+	return fixPermissions(destination, 0, 0)
+}
+
+func fixPermissions(destination string, uid, gid int) error {
+	return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
+		if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
+			return err
+		}
+		return nil
+	})
+}
+
+func (b *buildFile) clearTmp(containers map[string]struct{}) {
+	for c := range containers {
+		tmp := b.options.Daemon.Get(c)
+		if err := b.options.Daemon.Destroy(tmp); err != nil {
+			fmt.Fprintf(b.options.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
+		} else {
+			delete(containers, c)
+			fmt.Fprintf(b.options.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
+		}
+	}
+}

+ 0 - 8
builder/evaluator/support.go

@@ -6,17 +6,9 @@ import (
 )
 )
 
 
 var (
 var (
-	TOKEN_ESCAPED_QUOTE     = regexp.MustCompile(`\\"`)
-	TOKEN_ESCAPED_ESCAPE    = regexp.MustCompile(`\\\\`)
 	TOKEN_ENV_INTERPOLATION = regexp.MustCompile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
 	TOKEN_ENV_INTERPOLATION = regexp.MustCompile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
 )
 )
 
 
-func stripQuotes(str string) string {
-	str = str[1 : len(str)-1]
-	str = TOKEN_ESCAPED_QUOTE.ReplaceAllString(str, `"`)
-	return TOKEN_ESCAPED_ESCAPE.ReplaceAllString(str, `\`)
-}
-
 func replaceEnv(b *buildFile, str string) string {
 func replaceEnv(b *buildFile, str string) string {
 	for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) {
 	for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) {
 		match = match[strings.Index(match, "$"):]
 		match = match[strings.Index(match, "$"):]

+ 1 - 1
builder/parser/dumper/main.go

@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"fmt"
 	"os"
 	"os"
 
 
-	"github.com/erikh/buildfile/parser"
+	"github.com/docker/docker/builder/parser"
 )
 )
 
 
 func main() {
 func main() {

+ 17 - 8
builder/parser/line_parsers.go

@@ -27,13 +27,11 @@ func parseEnv(rest string) (*Node, error) {
 	node := blankNode()
 	node := blankNode()
 	rootnode := node
 	rootnode := node
 	strs := TOKEN_WHITESPACE.Split(rest, 2)
 	strs := TOKEN_WHITESPACE.Split(rest, 2)
-	node.Value = QuoteString(strs[0])
+	node.Value = strs[0]
 	node.Next = blankNode()
 	node.Next = blankNode()
-	node.Next.Value = QuoteString(strs[1])
+	node.Next.Value = strs[1]
 
 
 	return rootnode, nil
 	return rootnode, nil
-
-	return node, nil
 }
 }
 
 
 // parses a whitespace-delimited set of arguments. The result is effectively a
 // parses a whitespace-delimited set of arguments. The result is effectively a
@@ -41,18 +39,25 @@ func parseEnv(rest string) (*Node, error) {
 func parseStringsWhitespaceDelimited(rest string) (*Node, error) {
 func parseStringsWhitespaceDelimited(rest string) (*Node, error) {
 	node := blankNode()
 	node := blankNode()
 	rootnode := node
 	rootnode := node
+	prevnode := node
 	for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp
 	for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp
-		node.Value = QuoteString(str)
+		prevnode = node
+		node.Value = str
 		node.Next = blankNode()
 		node.Next = blankNode()
 		node = node.Next
 		node = node.Next
 	}
 	}
 
 
+	// XXX to get around regexp.Split *always* providing an empty string at the
+	// end due to how our loop is constructed, nil out the last node in the
+	// chain.
+	prevnode.Next = nil
+
 	return rootnode, nil
 	return rootnode, nil
 }
 }
 
 
 // parsestring just wraps the string in quotes and returns a working node.
 // parsestring just wraps the string in quotes and returns a working node.
 func parseString(rest string) (*Node, error) {
 func parseString(rest string) (*Node, error) {
-	return &Node{QuoteString(rest), nil, nil}, nil
+	return &Node{rest, nil, nil}, nil
 }
 }
 
 
 // parseJSON converts JSON arrays to an AST.
 // parseJSON converts JSON arrays to an AST.
@@ -61,6 +66,7 @@ func parseJSON(rest string) (*Node, error) {
 		myJson   []interface{}
 		myJson   []interface{}
 		next     = blankNode()
 		next     = blankNode()
 		orignext = next
 		orignext = next
+		prevnode = next
 	)
 	)
 
 
 	if err := json.Unmarshal([]byte(rest), &myJson); err != nil {
 	if err := json.Unmarshal([]byte(rest), &myJson); err != nil {
@@ -72,11 +78,14 @@ func parseJSON(rest string) (*Node, error) {
 		case float64:
 		case float64:
 			str = strconv.FormatFloat(str.(float64), 'G', -1, 64)
 			str = strconv.FormatFloat(str.(float64), 'G', -1, 64)
 		}
 		}
-		next.Value = QuoteString(str.(string))
+		next.Value = str.(string)
 		next.Next = blankNode()
 		next.Next = blankNode()
+		prevnode = next
 		next = next.Next
 		next = next.Next
 	}
 	}
 
 
+	prevnode.Next = nil
+
 	return orignext, nil
 	return orignext, nil
 }
 }
 
 
@@ -94,6 +103,6 @@ func parseMaybeJSON(rest string) (*Node, error) {
 	}
 	}
 
 
 	node := blankNode()
 	node := blankNode()
-	node.Value = QuoteString(rest)
+	node.Value = rest
 	return node, nil
 	return node, nil
 }
 }

+ 2 - 1
builder/parser/parser.go

@@ -43,7 +43,7 @@ type Node struct {
 
 
 var (
 var (
 	dispatch                map[string]func(string) (*Node, error)
 	dispatch                map[string]func(string) (*Node, error)
-	TOKEN_WHITESPACE        = regexp.MustCompile(`\s+`)
+	TOKEN_WHITESPACE        = regexp.MustCompile(`[\t\v\f\r ]+`)
 	TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\$`)
 	TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\$`)
 	TOKEN_COMMENT           = regexp.MustCompile(`^#.*$`)
 	TOKEN_COMMENT           = regexp.MustCompile(`^#.*$`)
 )
 )
@@ -70,6 +70,7 @@ func init() {
 		"entrypoint":     parseMaybeJSON,
 		"entrypoint":     parseMaybeJSON,
 		"expose":         parseStringsWhitespaceDelimited,
 		"expose":         parseStringsWhitespaceDelimited,
 		"volume":         parseMaybeJSON,
 		"volume":         parseMaybeJSON,
+		"insert":         parseIgnore,
 	}
 	}
 }
 }
 
 

+ 1 - 1
builder/parser/testfiles/brimstone-docker-consul/result

@@ -1,5 +1,5 @@
 (from "brimstone/ubuntu:14.04")
 (from "brimstone/ubuntu:14.04")
-(cmd)
+(cmd "")
 (entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui")
 (entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui")
 (expose "8500" "8600" "8400" "8301" "8302")
 (expose "8500" "8600" "8400" "8301" "8302")
 (run "apt-get update && apt-get install -y unzip wget && apt-get clean && rm -rf /var/lib/apt/lists")
 (run "apt-get update && apt-get install -y unzip wget && apt-get clean && rm -rf /var/lib/apt/lists")

+ 1 - 1
builder/parser/utils.go

@@ -41,7 +41,7 @@ func (node *Node) Dump() string {
 			if len(n.Children) > 0 {
 			if len(n.Children) > 0 {
 				str += " " + n.Dump()
 				str += " " + n.Dump()
 			} else {
 			} else {
-				str += " " + n.Value
+				str += " " + QuoteString(n.Value)
 			}
 			}
 		}
 		}
 	}
 	}