2015-09-05 19:49:06 +00:00
|
|
|
package dockerfile
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2014-08-07 05:56:44 +00:00
|
|
|
// internals for handling commands. Covers many areas and a lot of
|
|
|
|
// non-contiguous functionality. Please read the comments.
|
|
|
|
|
2014-08-05 22:41:09 +00:00
|
|
|
import (
|
|
|
|
"crypto/sha256"
|
|
|
|
"encoding/hex"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2014-10-22 18:16:42 +00:00
|
|
|
"net/http"
|
2014-08-05 22:41:09 +00:00
|
|
|
"net/url"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2015-05-07 15:39:42 +00:00
|
|
|
"runtime"
|
2014-08-05 22:41:09 +00:00
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2015-03-26 22:22:04 +00:00
|
|
|
"github.com/Sirupsen/logrus"
|
2015-09-06 17:26:40 +00:00
|
|
|
"github.com/docker/docker/api"
|
|
|
|
"github.com/docker/docker/builder"
|
2015-09-05 19:49:06 +00:00
|
|
|
"github.com/docker/docker/builder/dockerfile/parser"
|
2015-11-12 19:55:17 +00:00
|
|
|
"github.com/docker/docker/container"
|
2014-08-05 22:41:09 +00:00
|
|
|
"github.com/docker/docker/daemon"
|
2015-07-20 17:57:15 +00:00
|
|
|
"github.com/docker/docker/image"
|
2014-09-30 06:23:36 +00:00
|
|
|
"github.com/docker/docker/pkg/archive"
|
2015-03-29 21:17:23 +00:00
|
|
|
"github.com/docker/docker/pkg/httputils"
|
2015-01-27 04:56:34 +00:00
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
2015-03-18 02:18:41 +00:00
|
|
|
"github.com/docker/docker/pkg/jsonmessage"
|
2015-02-24 08:51:46 +00:00
|
|
|
"github.com/docker/docker/pkg/progressreader"
|
2015-09-06 17:26:40 +00:00
|
|
|
"github.com/docker/docker/pkg/streamformatter"
|
2015-03-24 11:25:26 +00:00
|
|
|
"github.com/docker/docker/pkg/stringid"
|
2015-08-28 15:29:10 +00:00
|
|
|
"github.com/docker/docker/pkg/stringutils"
|
2014-08-05 22:41:09 +00:00
|
|
|
"github.com/docker/docker/pkg/system"
|
|
|
|
"github.com/docker/docker/pkg/tarsum"
|
2014-11-24 23:47:42 +00:00
|
|
|
"github.com/docker/docker/pkg/urlutil"
|
2015-02-06 14:33:01 +00:00
|
|
|
"github.com/docker/docker/runconfig"
|
2014-08-05 22:41:09 +00:00
|
|
|
)
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
func (b *Builder) commit(id string, autoCmd *stringutils.StrSlice, comment string) error {
|
2015-01-09 21:07:00 +00:00
|
|
|
if b.disableCommit {
|
|
|
|
return nil
|
|
|
|
}
|
2014-10-28 21:06:23 +00:00
|
|
|
if b.image == "" && !b.noBaseImage {
|
2014-08-05 20:17:40 +00:00
|
|
|
return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
b.runConfig.Image = b.image
|
2014-08-05 20:17:40 +00:00
|
|
|
if id == "" {
|
2015-09-06 17:26:40 +00:00
|
|
|
cmd := b.runConfig.Cmd
|
2015-05-07 15:39:42 +00:00
|
|
|
if runtime.GOOS != "windows" {
|
2015-09-06 17:26:40 +00:00
|
|
|
b.runConfig.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", "#(nop) "+comment)
|
2015-05-07 15:39:42 +00:00
|
|
|
} else {
|
2015-09-06 17:26:40 +00:00
|
|
|
b.runConfig.Cmd = stringutils.NewStrSlice("cmd", "/S /C", "REM (nop) "+comment)
|
2015-05-07 15:39:42 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
defer func(cmd *stringutils.StrSlice) { b.runConfig.Cmd = cmd }(cmd)
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
if hit, err := b.probeCache(); err != nil {
|
2014-08-05 20:17:40 +00:00
|
|
|
return err
|
2015-09-06 17:26:40 +00:00
|
|
|
} else if hit {
|
2014-08-05 20:17:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-09-29 17:51:40 +00:00
|
|
|
container, err := b.create()
|
2014-08-05 20:17:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
id = container.ID
|
|
|
|
|
2015-11-03 01:06:09 +00:00
|
|
|
if err := b.docker.Mount(container); err != nil {
|
2014-08-05 20:17:40 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-11-03 01:06:09 +00:00
|
|
|
defer b.docker.Unmount(container)
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
|
2014-08-05 20:17:40 +00:00
|
|
|
// Note: Actually copy the struct
|
2015-09-06 17:26:40 +00:00
|
|
|
autoConfig := *b.runConfig
|
2014-08-05 20:17:40 +00:00
|
|
|
autoConfig.Cmd = autoCmd
|
2014-08-30 11:34:09 +00:00
|
|
|
|
2015-06-20 10:40:37 +00:00
|
|
|
commitCfg := &daemon.ContainerCommitConfig{
|
|
|
|
Author: b.maintainer,
|
|
|
|
Pause: true,
|
|
|
|
Config: &autoConfig,
|
|
|
|
}
|
|
|
|
|
2014-08-05 20:17:40 +00:00
|
|
|
// Commit the container
|
2015-11-18 22:20:54 +00:00
|
|
|
imageID, err := b.docker.Commit(id, commitCfg)
|
2014-08-05 20:17:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-11-18 22:20:54 +00:00
|
|
|
b.docker.Retain(b.id, imageID)
|
|
|
|
b.activeImages = append(b.activeImages, imageID)
|
|
|
|
b.image = imageID
|
2014-08-05 20:17:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-09-16 16:58:20 +00:00
|
|
|
type copyInfo struct {
|
2015-09-06 17:26:40 +00:00
|
|
|
builder.FileInfo
|
2014-09-16 16:58:20 +00:00
|
|
|
decompress bool
|
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error {
|
2014-08-05 20:17:40 +00:00
|
|
|
if b.context == nil {
|
|
|
|
return fmt.Errorf("No context given. Impossible to use %s", cmdName)
|
|
|
|
}
|
|
|
|
|
2014-09-16 16:58:20 +00:00
|
|
|
if len(args) < 2 {
|
|
|
|
return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
|
|
|
|
}
|
|
|
|
|
2015-07-12 16:05:10 +00:00
|
|
|
// Work in daemon-specific filepath semantics
|
|
|
|
dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest
|
2014-09-16 16:58:20 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
b.runConfig.Image = b.image
|
2014-09-16 16:58:20 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
var infos []copyInfo
|
2014-09-16 16:58:20 +00:00
|
|
|
|
|
|
|
// Loop through each src file and calculate the info we need to
|
|
|
|
// do the copy (e.g. hash value if cached). Don't actually do
|
|
|
|
// the copy until we've looked at all src files
|
2015-09-06 17:26:40 +00:00
|
|
|
var err error
|
2014-09-22 13:41:02 +00:00
|
|
|
for _, orig := range args[0 : len(args)-1] {
|
2015-09-06 17:26:40 +00:00
|
|
|
var fi builder.FileInfo
|
|
|
|
decompress := allowLocalDecompression
|
|
|
|
if urlutil.IsURL(orig) {
|
|
|
|
if !allowRemote {
|
|
|
|
return fmt.Errorf("Source can't be a URL for %s", cmdName)
|
|
|
|
}
|
|
|
|
fi, err = b.download(orig)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(filepath.Dir(fi.Path()))
|
|
|
|
decompress = false
|
|
|
|
infos = append(infos, copyInfo{fi, decompress})
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// not a URL
|
|
|
|
subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true)
|
|
|
|
if err != nil {
|
2014-09-16 16:58:20 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
|
|
|
|
infos = append(infos, subInfos...)
|
2014-09-22 13:41:02 +00:00
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
if len(infos) == 0 {
|
2014-09-22 13:41:02 +00:00
|
|
|
return fmt.Errorf("No source files were specified")
|
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
|
2014-09-22 13:41:02 +00:00
|
|
|
return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
|
|
|
}
|
2014-09-16 16:58:20 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// For backwards compat, if there's just one info then use it as the
|
2014-09-22 13:41:02 +00:00
|
|
|
// cache look-up string, otherwise hash 'em all into one
|
|
|
|
var srcHash string
|
|
|
|
var origPaths string
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
if len(infos) == 1 {
|
|
|
|
fi := infos[0].FileInfo
|
|
|
|
origPaths = fi.Name()
|
|
|
|
if hfi, ok := fi.(builder.Hashed); ok {
|
|
|
|
srcHash = hfi.Hash()
|
|
|
|
}
|
2014-09-22 13:41:02 +00:00
|
|
|
} else {
|
|
|
|
var hashs []string
|
|
|
|
var origs []string
|
2015-09-06 17:26:40 +00:00
|
|
|
for _, info := range infos {
|
|
|
|
fi := info.FileInfo
|
|
|
|
origs = append(origs, fi.Name())
|
|
|
|
if hfi, ok := fi.(builder.Hashed); ok {
|
|
|
|
hashs = append(hashs, hfi.Hash())
|
|
|
|
}
|
2014-09-16 16:58:20 +00:00
|
|
|
}
|
2014-09-22 13:41:02 +00:00
|
|
|
hasher := sha256.New()
|
|
|
|
hasher.Write([]byte(strings.Join(hashs, ",")))
|
|
|
|
srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
|
|
|
|
origPaths = strings.Join(origs, " ")
|
2014-09-16 16:58:20 +00:00
|
|
|
}
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
cmd := b.runConfig.Cmd
|
2015-05-07 15:39:42 +00:00
|
|
|
if runtime.GOOS != "windows" {
|
2015-09-06 17:26:40 +00:00
|
|
|
b.runConfig.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
|
2015-05-07 15:39:42 +00:00
|
|
|
} else {
|
2015-11-09 19:49:16 +00:00
|
|
|
b.runConfig.Cmd = stringutils.NewStrSlice("cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
|
2015-05-07 15:39:42 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
defer func(cmd *stringutils.StrSlice) { b.runConfig.Cmd = cmd }(cmd)
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
if hit, err := b.probeCache(); err != nil {
|
2014-09-16 16:58:20 +00:00
|
|
|
return err
|
2015-09-06 17:26:40 +00:00
|
|
|
} else if hit {
|
2014-09-16 16:58:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
container, _, err := b.docker.Create(b.runConfig, nil)
|
2015-09-23 01:06:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-11-03 01:06:09 +00:00
|
|
|
defer b.docker.Unmount(container)
|
2015-09-06 17:26:40 +00:00
|
|
|
b.tmpContainers[container.ID] = struct{}{}
|
2015-09-23 01:06:09 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)
|
2014-09-16 16:58:20 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// Twiddle the destination when its a relative path - meaning, make it
|
|
|
|
// relative to the WORKINGDIR
|
|
|
|
if !system.IsAbs(dest) {
|
|
|
|
hasSlash := strings.HasSuffix(dest, string(os.PathSeparator))
|
|
|
|
dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest)
|
|
|
|
|
|
|
|
// Make sure we preserve any trailing slash
|
|
|
|
if hasSlash {
|
|
|
|
dest += string(os.PathSeparator)
|
|
|
|
}
|
2014-09-16 16:58:20 +00:00
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
for _, info := range infos {
|
|
|
|
if err := b.docker.Copy(container, dest, info.FileInfo, info.decompress); err != nil {
|
2014-09-16 16:58:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
if err := b.commit(container.ID, cmd, comment); err != nil {
|
2014-09-16 16:58:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) {
|
|
|
|
// get filename from URL
|
|
|
|
u, err := url.Parse(srcURL)
|
|
|
|
if err != nil {
|
|
|
|
return
|
2014-09-22 13:41:02 +00:00
|
|
|
}
|
2015-10-07 22:57:37 +00:00
|
|
|
path := filepath.FromSlash(u.Path) // Ensure in platform semantics
|
2015-09-06 17:26:40 +00:00
|
|
|
if strings.HasSuffix(path, string(os.PathSeparator)) {
|
|
|
|
path = path[:len(path)-1]
|
|
|
|
}
|
|
|
|
parts := strings.Split(path, string(os.PathSeparator))
|
|
|
|
filename := parts[len(parts)-1]
|
|
|
|
if filename == "" {
|
|
|
|
err = fmt.Errorf("cannot determine filename from url: %s", u)
|
|
|
|
return
|
2014-12-12 18:32:11 +00:00
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// Initiate the download
|
|
|
|
resp, err := httputils.Download(srcURL)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// Prepare file in a tmp dir
|
|
|
|
tmpDir, err := ioutils.TempDir("", "docker-remote")
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer func() {
|
2014-08-05 20:17:40 +00:00
|
|
|
if err != nil {
|
2015-09-06 17:26:40 +00:00
|
|
|
os.RemoveAll(tmpDir)
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
}()
|
|
|
|
tmpFileName := filepath.Join(tmpDir, filename)
|
|
|
|
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// Download and dump result to tmp file
|
|
|
|
if _, err = io.Copy(tmpFile, progressreader.New(progressreader.Config{
|
|
|
|
In: resp.Body,
|
|
|
|
// TODO: make progressreader streamformatter agnostic
|
|
|
|
Out: b.Stdout.(*streamformatter.StdoutFormatter).Writer,
|
|
|
|
Formatter: b.Stdout.(*streamformatter.StdoutFormatter).StreamFormatter,
|
|
|
|
Size: resp.ContentLength,
|
|
|
|
NewLines: true,
|
|
|
|
ID: "",
|
|
|
|
Action: "Downloading",
|
|
|
|
})); err != nil {
|
2014-08-05 20:17:40 +00:00
|
|
|
tmpFile.Close()
|
2015-09-06 17:26:40 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
fmt.Fprintln(b.Stdout)
|
|
|
|
// ignoring error because the file was already opened successfully
|
|
|
|
tmpFileSt, err := tmpFile.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
tmpFile.Close()
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// Set the mtime to the Last-Modified header value if present
|
|
|
|
// Otherwise just remove atime and mtime
|
|
|
|
mTime := time.Time{}
|
2014-10-22 18:16:42 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
lastMod := resp.Header.Get("Last-Modified")
|
|
|
|
if lastMod != "" {
|
|
|
|
// If we can't parse it then just let it default to 'zero'
|
|
|
|
// otherwise use the parsed time value
|
|
|
|
if parsedMTime, err := http.ParseTime(lastMod); err == nil {
|
|
|
|
mTime = parsedMTime
|
2014-10-22 18:16:42 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
}
|
2014-10-22 18:16:42 +00:00
|
|
|
|
2015-11-05 19:47:18 +00:00
|
|
|
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
|
2015-09-06 17:26:40 +00:00
|
|
|
return
|
|
|
|
}
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// Calc the checksum, even if we're using the cache
|
|
|
|
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, err = io.Copy(ioutil.Discard, tarSum); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
hash := tarSum.Sum(nil)
|
|
|
|
r.Close()
|
|
|
|
return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil
|
|
|
|
}
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) {
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// Work in daemon-specific OS filepath semantics
|
|
|
|
origPath = filepath.FromSlash(origPath)
|
2014-09-22 13:41:02 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
|
|
|
|
origPath = origPath[1:]
|
2014-09-22 13:41:02 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
|
2014-09-22 13:41:02 +00:00
|
|
|
|
|
|
|
// Deal with wildcards
|
2015-07-22 05:29:03 +00:00
|
|
|
if allowWildcards && containsWildcards(origPath) {
|
2015-09-06 17:26:40 +00:00
|
|
|
var copyInfos []copyInfo
|
|
|
|
if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if info.Name() == "" {
|
|
|
|
// Why are we doing this check?
|
|
|
|
return nil
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
if match, _ := filepath.Match(origPath, path); !match {
|
|
|
|
return nil
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
2014-09-22 13:41:02 +00:00
|
|
|
|
2015-04-10 19:39:42 +00:00
|
|
|
// Note we set allowWildcards to false in case the name has
|
|
|
|
// a * in it
|
2015-09-06 17:26:40 +00:00
|
|
|
subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
copyInfos = append(copyInfos, subInfos...)
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
2014-09-22 13:41:02 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
return copyInfos, nil
|
2014-09-22 13:41:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Must be a dir or a file
|
|
|
|
|
2015-11-04 21:42:08 +00:00
|
|
|
statPath, fi, err := b.context.Stat(origPath)
|
2015-09-06 17:26:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2014-09-22 13:41:02 +00:00
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}}
|
|
|
|
|
|
|
|
hfi, handleHash := fi.(builder.Hashed)
|
|
|
|
if !handleHash {
|
|
|
|
return copyInfos, nil
|
|
|
|
}
|
2014-09-22 13:41:02 +00:00
|
|
|
|
|
|
|
// Deal with the single file case
|
|
|
|
if !fi.IsDir() {
|
2015-09-06 17:26:40 +00:00
|
|
|
hfi.SetHash("file:" + hfi.Hash())
|
|
|
|
return copyInfos, nil
|
2014-09-22 13:41:02 +00:00
|
|
|
}
|
|
|
|
// Must be a dir
|
|
|
|
var subfiles []string
|
2015-11-04 21:42:08 +00:00
|
|
|
err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error {
|
2015-09-06 17:26:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-09-22 13:41:02 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
// we already checked handleHash above
|
|
|
|
subfiles = append(subfiles, info.(builder.Hashed).Hash())
|
|
|
|
return nil
|
|
|
|
})
|
2015-11-04 21:42:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
|
2014-09-22 13:41:02 +00:00
|
|
|
sort.Strings(subfiles)
|
|
|
|
hasher := sha256.New()
|
|
|
|
hasher.Write([]byte(strings.Join(subfiles, ",")))
|
2015-09-06 17:26:40 +00:00
|
|
|
hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil)))
|
2014-09-22 13:41:02 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
return copyInfos, nil
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
2014-08-05 22:41:09 +00:00
|
|
|
|
2015-07-22 05:29:03 +00:00
|
|
|
func containsWildcards(name string) bool {
|
2014-09-22 13:41:02 +00:00
|
|
|
for i := 0; i < len(name); i++ {
|
|
|
|
ch := name[i]
|
|
|
|
if ch == '\\' {
|
|
|
|
i++
|
|
|
|
} else if ch == '*' || ch == '?' || ch == '[' {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
func (b *Builder) processImageFrom(img *image.Image) error {
|
2015-11-18 22:20:54 +00:00
|
|
|
b.image = img.ID().String()
|
2014-08-13 10:07:41 +00:00
|
|
|
|
2014-08-05 22:41:09 +00:00
|
|
|
if img.Config != nil {
|
2015-09-06 17:26:40 +00:00
|
|
|
b.runConfig = img.Config
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2014-08-13 10:07:41 +00:00
|
|
|
|
2015-06-01 23:42:27 +00:00
|
|
|
// The default path will be blank on Windows (set by HCS)
|
2015-11-12 19:55:17 +00:00
|
|
|
if len(b.runConfig.Env) == 0 && container.DefaultPathEnv != "" {
|
|
|
|
b.runConfig.Env = append(b.runConfig.Env, "PATH="+container.DefaultPathEnv)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2014-08-13 10:07:41 +00:00
|
|
|
|
2014-08-05 22:41:09 +00:00
|
|
|
// Process ONBUILD triggers if they exist
|
2015-09-06 17:26:40 +00:00
|
|
|
if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 {
|
2015-08-27 02:45:18 +00:00
|
|
|
word := "trigger"
|
|
|
|
if nTriggers > 1 {
|
|
|
|
word = "triggers"
|
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
2015-04-27 20:33:30 +00:00
|
|
|
// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
|
2015-09-06 17:26:40 +00:00
|
|
|
onBuildTriggers := b.runConfig.OnBuild
|
|
|
|
b.runConfig.OnBuild = []string{}
|
2014-08-05 22:41:09 +00:00
|
|
|
|
2014-10-13 20:14:35 +00:00
|
|
|
// parse the ONBUILD triggers by invoking the parser
|
2015-08-27 02:45:18 +00:00
|
|
|
for _, step := range onBuildTriggers {
|
2014-10-13 20:14:35 +00:00
|
|
|
ast, err := parser.Parse(strings.NewReader(step))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
2014-10-13 20:14:35 +00:00
|
|
|
for i, n := range ast.Children {
|
|
|
|
switch strings.ToUpper(n.Value) {
|
|
|
|
case "ONBUILD":
|
|
|
|
return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
|
|
|
case "MAINTAINER", "FROM":
|
|
|
|
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
|
|
|
|
}
|
2014-08-05 22:41:09 +00:00
|
|
|
|
2015-09-29 17:51:40 +00:00
|
|
|
if err := b.dispatch(i, n); err != nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// probeCache checks if `b.docker` implements builder.ImageCache and image-caching
|
|
|
|
// is enabled (`b.UseCache`).
|
|
|
|
// If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`.
|
|
|
|
// If an image is found, probeCache returns `(true, nil)`.
|
|
|
|
// If no image is found, it returns `(false, nil)`.
|
|
|
|
// If there is any error, it returns `(false, err)`.
|
|
|
|
func (b *Builder) probeCache() (bool, error) {
|
|
|
|
c, ok := b.docker.(builder.ImageCache)
|
|
|
|
if !ok || !b.UseCache || b.cacheBusted {
|
2015-02-25 18:27:32 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
cache, err := c.GetCachedImage(b.image, b.runConfig)
|
2015-02-25 18:27:32 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
if len(cache) == 0 {
|
2015-10-06 12:43:51 +00:00
|
|
|
logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd)
|
2015-02-25 18:27:32 +00:00
|
|
|
b.cacheBusted = true
|
|
|
|
return false, nil
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2015-02-25 18:27:32 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
fmt.Fprintf(b.Stdout, " ---> Using cache\n")
|
2015-10-06 12:43:51 +00:00
|
|
|
logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd)
|
2015-09-06 17:26:40 +00:00
|
|
|
b.image = string(cache)
|
|
|
|
|
|
|
|
// TODO: remove once Commit can take a tag parameter.
|
|
|
|
b.docker.Retain(b.id, b.image)
|
|
|
|
b.activeImages = append(b.activeImages, b.image)
|
|
|
|
|
2015-02-25 18:27:32 +00:00
|
|
|
return true, nil
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
2015-11-12 19:55:17 +00:00
|
|
|
func (b *Builder) create() (*container.Container, error) {
|
2014-10-28 21:06:23 +00:00
|
|
|
if b.image == "" && !b.noBaseImage {
|
2014-08-05 22:41:09 +00:00
|
|
|
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
b.runConfig.Image = b.image
|
2014-08-05 22:41:09 +00:00
|
|
|
|
2015-11-18 19:03:08 +00:00
|
|
|
resources := runconfig.Resources{
|
|
|
|
CgroupParent: b.CgroupParent,
|
2015-09-06 17:26:40 +00:00
|
|
|
CPUShares: b.CPUShares,
|
|
|
|
CPUPeriod: b.CPUPeriod,
|
|
|
|
CPUQuota: b.CPUQuota,
|
|
|
|
CpusetCpus: b.CPUSetCpus,
|
|
|
|
CpusetMems: b.CPUSetMems,
|
|
|
|
Memory: b.Memory,
|
|
|
|
MemorySwap: b.MemorySwap,
|
|
|
|
Ulimits: b.Ulimits,
|
2015-11-18 19:03:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: why not embed a hostconfig in builder?
|
|
|
|
hostConfig := &runconfig.HostConfig{
|
|
|
|
Isolation: b.Isolation,
|
|
|
|
ShmSize: b.ShmSize,
|
|
|
|
Resources: resources,
|
2015-02-06 14:33:01 +00:00
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
config := *b.runConfig
|
2014-09-04 15:43:28 +00:00
|
|
|
|
2014-08-05 22:41:09 +00:00
|
|
|
// Create the container
|
2015-09-06 17:26:40 +00:00
|
|
|
c, warnings, err := b.docker.Create(b.runConfig, hostConfig)
|
2014-08-05 22:41:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-11-03 01:06:09 +00:00
|
|
|
defer b.docker.Unmount(c)
|
2015-09-06 17:26:40 +00:00
|
|
|
for _, warning := range warnings {
|
|
|
|
fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning)
|
2015-09-23 01:06:09 +00:00
|
|
|
}
|
2014-08-26 19:25:44 +00:00
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
b.tmpContainers[c.ID] = struct{}{}
|
|
|
|
fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID))
|
2014-08-05 22:41:09 +00:00
|
|
|
|
2015-04-11 00:05:21 +00:00
|
|
|
if config.Cmd.Len() > 0 {
|
2015-01-09 01:00:00 +00:00
|
|
|
// override the entry point that may have been picked up from the base image
|
2015-04-11 00:05:21 +00:00
|
|
|
s := config.Cmd.Slice()
|
|
|
|
c.Path = s[0]
|
|
|
|
c.Args = s[1:]
|
2015-01-09 01:00:00 +00:00
|
|
|
}
|
2014-08-05 22:41:09 +00:00
|
|
|
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2015-11-12 19:55:17 +00:00
|
|
|
func (b *Builder) run(c *container.Container) error {
|
2015-02-04 23:37:14 +00:00
|
|
|
var errCh chan error
|
|
|
|
if b.Verbose {
|
2015-09-06 17:26:40 +00:00
|
|
|
errCh = c.Attach(nil, b.Stdout, b.Stderr)
|
2015-02-04 23:37:14 +00:00
|
|
|
}
|
|
|
|
|
2014-08-05 22:41:09 +00:00
|
|
|
//start the container
|
2015-11-03 01:06:09 +00:00
|
|
|
if err := b.docker.Start(c); err != nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-03-10 22:10:00 +00:00
|
|
|
finished := make(chan struct{})
|
|
|
|
defer close(finished)
|
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case <-b.cancelled:
|
2015-10-16 09:18:10 +00:00
|
|
|
logrus.Debugln("Build cancelled, killing and removing container:", c.ID)
|
2015-11-02 23:25:26 +00:00
|
|
|
b.docker.Kill(c)
|
2015-10-16 09:18:10 +00:00
|
|
|
b.removeContainer(c.ID)
|
2015-03-10 22:10:00 +00:00
|
|
|
case <-finished:
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2015-02-20 21:26:11 +00:00
|
|
|
if b.Verbose {
|
|
|
|
// Block on reading output from container, stop on err or chan closed
|
|
|
|
if err := <-errCh; err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for it to finish
|
2014-08-31 15:20:35 +00:00
|
|
|
if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
|
2015-09-06 17:26:40 +00:00
|
|
|
// TODO: change error type, because jsonmessage.JSONError assumes HTTP
|
2015-04-16 19:22:32 +00:00
|
|
|
return &jsonmessage.JSONError{
|
2015-09-06 17:26:40 +00:00
|
|
|
Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.runConfig.Cmd.ToString(), ret),
|
2014-08-05 22:41:09 +00:00
|
|
|
Code: ret,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-10-16 09:18:10 +00:00
|
|
|
func (b *Builder) removeContainer(c string) error {
|
|
|
|
rmConfig := &daemon.ContainerRmConfig{
|
|
|
|
ForceRemove: true,
|
|
|
|
RemoveVolume: true,
|
|
|
|
}
|
|
|
|
if err := b.docker.Remove(c, rmConfig); err != nil {
|
|
|
|
fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
func (b *Builder) clearTmp() {
|
|
|
|
for c := range b.tmpContainers {
|
2015-10-16 09:18:10 +00:00
|
|
|
if err := b.removeContainer(c); err != nil {
|
2015-09-06 17:26:40 +00:00
|
|
|
return
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
delete(b.tmpContainers, c)
|
|
|
|
fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c))
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// readDockerfile reads a Dockerfile from the current context.
|
|
|
|
func (b *Builder) readDockerfile() error {
|
|
|
|
// If no -f was specified then look for 'Dockerfile'. If we can't find
|
|
|
|
// that then look for 'dockerfile'. If neither are found then default
|
|
|
|
// back to 'Dockerfile' and use that in the error message.
|
|
|
|
if b.DockerfileName == "" {
|
|
|
|
b.DockerfileName = api.DefaultDockerfileName
|
2015-11-04 21:42:08 +00:00
|
|
|
if _, _, err := b.context.Stat(b.DockerfileName); os.IsNotExist(err) {
|
2015-09-06 17:26:40 +00:00
|
|
|
lowercase := strings.ToLower(b.DockerfileName)
|
2015-11-04 21:42:08 +00:00
|
|
|
if _, _, err := b.context.Stat(lowercase); err == nil {
|
2015-09-06 17:26:40 +00:00
|
|
|
b.DockerfileName = lowercase
|
|
|
|
}
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
f, err := b.context.Open(b.DockerfileName)
|
2014-08-05 22:41:09 +00:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
2015-09-06 17:26:40 +00:00
|
|
|
return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.DockerfileName)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
if f, ok := f.(*os.File); ok {
|
|
|
|
// ignoring error because Open already succeeded
|
|
|
|
fi, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Unexpected error reading Dockerfile: %v", err)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
if fi.Size() == 0 {
|
|
|
|
return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.DockerfileName)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
b.dockerfile, err = parser.Parse(f)
|
|
|
|
f.Close()
|
|
|
|
if err != nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// After the Dockerfile has been parsed, we need to check the .dockerignore
|
|
|
|
// file for either "Dockerfile" or ".dockerignore", and if either are
|
|
|
|
// present then erase them from the build context. These files should never
|
|
|
|
// have been sent from the client but we did send them to make sure that
|
|
|
|
// we had the Dockerfile to actually parse, and then we also need the
|
|
|
|
// .dockerignore file to know whether either file should be removed.
|
|
|
|
// Note that this assumes the Dockerfile has been read into memory and
|
|
|
|
// is now safe to be removed.
|
|
|
|
if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok {
|
|
|
|
dockerIgnore.Process([]string{b.DockerfileName})
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
return nil
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
2015-09-06 17:26:40 +00:00
|
|
|
// determine if build arg is part of built-in args or user
|
|
|
|
// defined args in Dockerfile at any point in time.
|
|
|
|
func (b *Builder) isBuildArgAllowed(arg string) bool {
|
|
|
|
if _, ok := BuiltinAllowedBuildArgs[arg]; ok {
|
|
|
|
return true
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
if _, ok := b.allowedBuildArgs[arg]; ok {
|
|
|
|
return true
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2015-09-06 17:26:40 +00:00
|
|
|
return false
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|