2014-08-15 16:29:35 +00:00
|
|
|
package builder
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2014-08-07 05:56:44 +00:00
|
|
|
// internals for handling commands. Covers many areas and a lot of
|
|
|
|
// non-contiguous functionality. Please read the comments.
|
|
|
|
|
2014-08-05 22:41:09 +00:00
|
|
|
import (
|
|
|
|
"crypto/sha256"
|
|
|
|
"encoding/hex"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2014-10-22 18:16:42 +00:00
|
|
|
"net/http"
|
2014-08-05 22:41:09 +00:00
|
|
|
"net/url"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2015-05-07 15:39:42 +00:00
|
|
|
"runtime"
|
2014-08-05 22:41:09 +00:00
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
2015-03-26 22:22:04 +00:00
|
|
|
"github.com/Sirupsen/logrus"
|
2014-10-13 20:14:35 +00:00
|
|
|
"github.com/docker/docker/builder/parser"
|
2015-06-23 19:58:17 +00:00
|
|
|
"github.com/docker/docker/cliconfig"
|
2015-09-10 22:01:18 +00:00
|
|
|
"github.com/docker/docker/context"
|
2014-08-05 22:41:09 +00:00
|
|
|
"github.com/docker/docker/daemon"
|
2015-04-15 11:43:15 +00:00
|
|
|
"github.com/docker/docker/graph"
|
2015-07-20 17:57:15 +00:00
|
|
|
"github.com/docker/docker/image"
|
2014-09-30 06:23:36 +00:00
|
|
|
"github.com/docker/docker/pkg/archive"
|
2014-10-29 19:06:51 +00:00
|
|
|
"github.com/docker/docker/pkg/chrootarchive"
|
2015-03-29 21:17:23 +00:00
|
|
|
"github.com/docker/docker/pkg/httputils"
|
2015-01-27 04:56:34 +00:00
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
2015-03-18 02:18:41 +00:00
|
|
|
"github.com/docker/docker/pkg/jsonmessage"
|
2014-08-05 22:41:09 +00:00
|
|
|
"github.com/docker/docker/pkg/parsers"
|
2015-02-24 08:51:46 +00:00
|
|
|
"github.com/docker/docker/pkg/progressreader"
|
2015-03-24 11:25:26 +00:00
|
|
|
"github.com/docker/docker/pkg/stringid"
|
2015-08-28 15:29:10 +00:00
|
|
|
"github.com/docker/docker/pkg/stringutils"
|
2015-08-24 21:07:22 +00:00
|
|
|
"github.com/docker/docker/pkg/symlink"
|
2014-08-05 22:41:09 +00:00
|
|
|
"github.com/docker/docker/pkg/system"
|
|
|
|
"github.com/docker/docker/pkg/tarsum"
|
2014-11-24 23:47:42 +00:00
|
|
|
"github.com/docker/docker/pkg/urlutil"
|
2015-04-22 12:06:58 +00:00
|
|
|
"github.com/docker/docker/registry"
|
2015-02-06 14:33:01 +00:00
|
|
|
"github.com/docker/docker/runconfig"
|
2014-08-05 22:41:09 +00:00
|
|
|
)
|
|
|
|
|
2015-07-22 05:29:03 +00:00
|
|
|
func (b *builder) readContext(context io.Reader) (err error) {
|
2015-08-24 21:07:22 +00:00
|
|
|
tmpdirPath, err := getTempDir("", "docker-build")
|
2014-08-05 20:17:40 +00:00
|
|
|
if err != nil {
|
2015-07-15 13:04:35 +00:00
|
|
|
return
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
|
|
|
|
2015-07-15 13:04:35 +00:00
|
|
|
// Make sure we clean-up upon error. In the happy case the caller
|
|
|
|
// is expected to manage the clean-up
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
if e := os.RemoveAll(tmpdirPath); e != nil {
|
|
|
|
logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2014-08-05 20:17:40 +00:00
|
|
|
decompressedStream, err := archive.DecompressStream(context)
|
|
|
|
if err != nil {
|
2015-07-15 13:04:35 +00:00
|
|
|
return
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
|
|
|
|
2015-04-02 17:42:40 +00:00
|
|
|
if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil {
|
2015-07-15 13:04:35 +00:00
|
|
|
return
|
2014-08-21 20:12:52 +00:00
|
|
|
}
|
2014-10-29 19:06:51 +00:00
|
|
|
|
2015-07-15 13:04:35 +00:00
|
|
|
if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
|
|
|
|
return
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
b.contextPath = tmpdirPath
|
2015-07-15 13:04:35 +00:00
|
|
|
return
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.StrSlice, comment string) error {
|
2015-01-09 21:07:00 +00:00
|
|
|
if b.disableCommit {
|
|
|
|
return nil
|
|
|
|
}
|
2014-10-28 21:06:23 +00:00
|
|
|
if b.image == "" && !b.noBaseImage {
|
2014-08-05 20:17:40 +00:00
|
|
|
return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
|
|
|
}
|
2014-08-11 15:44:31 +00:00
|
|
|
b.Config.Image = b.image
|
2014-08-05 20:17:40 +00:00
|
|
|
if id == "" {
|
2014-08-11 15:44:31 +00:00
|
|
|
cmd := b.Config.Cmd
|
2015-05-07 15:39:42 +00:00
|
|
|
if runtime.GOOS != "windows" {
|
2015-08-28 15:29:10 +00:00
|
|
|
b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", "#(nop) "+comment)
|
2015-05-07 15:39:42 +00:00
|
|
|
} else {
|
2015-08-28 15:29:10 +00:00
|
|
|
b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S /C", "REM (nop) "+comment)
|
2015-05-07 15:39:42 +00:00
|
|
|
}
|
2015-08-28 15:29:10 +00:00
|
|
|
defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
hit, err := b.probeCache(ctx)
|
2014-08-05 20:17:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if hit {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
container, err := b.create(ctx)
|
2014-08-05 20:17:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
id = container.ID
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
if err := container.Mount(ctx); err != nil {
|
2014-08-05 20:17:40 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-09-10 22:01:18 +00:00
|
|
|
defer container.Unmount(ctx)
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
2015-09-10 22:01:18 +00:00
|
|
|
container, err := b.Daemon.Get(ctx, id)
|
2014-12-16 23:06:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Note: Actually copy the struct
|
2014-08-11 15:44:31 +00:00
|
|
|
autoConfig := *b.Config
|
2014-08-05 20:17:40 +00:00
|
|
|
autoConfig.Cmd = autoCmd
|
2014-08-30 11:34:09 +00:00
|
|
|
|
2015-06-20 10:40:37 +00:00
|
|
|
commitCfg := &daemon.ContainerCommitConfig{
|
|
|
|
Author: b.maintainer,
|
|
|
|
Pause: true,
|
|
|
|
Config: &autoConfig,
|
|
|
|
}
|
|
|
|
|
2014-08-05 20:17:40 +00:00
|
|
|
// Commit the container
|
2015-09-10 22:01:18 +00:00
|
|
|
image, err := b.Daemon.Commit(ctx, container, commitCfg)
|
2014-08-05 20:17:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-09-10 22:01:18 +00:00
|
|
|
b.Daemon.Graph(ctx).Retain(b.id, image.ID)
|
2015-04-08 02:29:29 +00:00
|
|
|
b.activeImages = append(b.activeImages, image.ID)
|
2014-08-05 20:17:40 +00:00
|
|
|
b.image = image.ID
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-09-16 16:58:20 +00:00
|
|
|
type copyInfo struct {
|
|
|
|
origPath string
|
|
|
|
destPath string
|
2014-09-22 13:41:02 +00:00
|
|
|
hash string
|
2014-09-16 16:58:20 +00:00
|
|
|
decompress bool
|
|
|
|
tmpDir string
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
func (b *builder) runContextCommand(ctx context.Context, args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
|
2014-08-05 20:17:40 +00:00
|
|
|
if b.context == nil {
|
|
|
|
return fmt.Errorf("No context given. Impossible to use %s", cmdName)
|
|
|
|
}
|
|
|
|
|
2014-09-16 16:58:20 +00:00
|
|
|
if len(args) < 2 {
|
|
|
|
return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
|
|
|
|
}
|
|
|
|
|
2015-07-12 16:05:10 +00:00
|
|
|
// Work in daemon-specific filepath semantics
|
|
|
|
dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest
|
2014-09-16 16:58:20 +00:00
|
|
|
|
2014-09-22 13:41:02 +00:00
|
|
|
copyInfos := []*copyInfo{}
|
2014-09-16 16:58:20 +00:00
|
|
|
|
|
|
|
b.Config.Image = b.image
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
for _, ci := range copyInfos {
|
|
|
|
if ci.tmpDir != "" {
|
|
|
|
os.RemoveAll(ci.tmpDir)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Loop through each src file and calculate the info we need to
|
|
|
|
// do the copy (e.g. hash value if cached). Don't actually do
|
|
|
|
// the copy until we've looked at all src files
|
2014-09-22 13:41:02 +00:00
|
|
|
for _, orig := range args[0 : len(args)-1] {
|
2015-04-26 16:50:25 +00:00
|
|
|
if err := calcCopyInfo(
|
|
|
|
b,
|
|
|
|
cmdName,
|
|
|
|
©Infos,
|
|
|
|
orig,
|
|
|
|
dest,
|
|
|
|
allowRemote,
|
|
|
|
allowDecompression,
|
2015-04-10 19:39:42 +00:00
|
|
|
true,
|
2015-04-26 16:50:25 +00:00
|
|
|
); err != nil {
|
2014-09-16 16:58:20 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-09-22 13:41:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(copyInfos) == 0 {
|
|
|
|
return fmt.Errorf("No source files were specified")
|
|
|
|
}
|
2015-07-12 16:05:10 +00:00
|
|
|
if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
|
2014-09-22 13:41:02 +00:00
|
|
|
return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
|
|
|
}
|
2014-09-16 16:58:20 +00:00
|
|
|
|
2014-09-22 13:41:02 +00:00
|
|
|
// For backwards compat, if there's just one CI then use it as the
|
|
|
|
// cache look-up string, otherwise hash 'em all into one
|
|
|
|
var srcHash string
|
|
|
|
var origPaths string
|
|
|
|
|
|
|
|
if len(copyInfos) == 1 {
|
|
|
|
srcHash = copyInfos[0].hash
|
|
|
|
origPaths = copyInfos[0].origPath
|
|
|
|
} else {
|
|
|
|
var hashs []string
|
|
|
|
var origs []string
|
|
|
|
for _, ci := range copyInfos {
|
|
|
|
hashs = append(hashs, ci.hash)
|
|
|
|
origs = append(origs, ci.origPath)
|
2014-09-16 16:58:20 +00:00
|
|
|
}
|
2014-09-22 13:41:02 +00:00
|
|
|
hasher := sha256.New()
|
|
|
|
hasher.Write([]byte(strings.Join(hashs, ",")))
|
|
|
|
srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
|
|
|
|
origPaths = strings.Join(origs, " ")
|
2014-09-16 16:58:20 +00:00
|
|
|
}
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2014-08-11 15:44:31 +00:00
|
|
|
cmd := b.Config.Cmd
|
2015-05-07 15:39:42 +00:00
|
|
|
if runtime.GOOS != "windows" {
|
2015-08-28 15:29:10 +00:00
|
|
|
b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
|
2015-05-07 15:39:42 +00:00
|
|
|
} else {
|
2015-08-28 15:29:10 +00:00
|
|
|
b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S /C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
|
2015-05-07 15:39:42 +00:00
|
|
|
}
|
2015-08-28 15:29:10 +00:00
|
|
|
defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
hit, err := b.probeCache(ctx)
|
2014-09-16 16:58:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-02-25 22:04:35 +00:00
|
|
|
|
|
|
|
if hit {
|
2014-09-16 16:58:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
container, _, err := b.Daemon.ContainerCreate(ctx, "", b.Config, nil, true)
|
2014-09-16 16:58:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
b.TmpContainers[container.ID] = struct{}{}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
if err := container.Mount(ctx); err != nil {
|
2014-09-16 16:58:20 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-09-10 22:01:18 +00:00
|
|
|
defer container.Unmount(ctx)
|
2014-09-16 16:58:20 +00:00
|
|
|
|
|
|
|
for _, ci := range copyInfos {
|
|
|
|
if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
if err := b.commit(ctx, container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
|
2014-09-16 16:58:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-22 05:29:03 +00:00
|
|
|
func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
|
2014-09-22 13:41:02 +00:00
|
|
|
|
2015-07-12 16:05:10 +00:00
|
|
|
// Work in daemon-specific OS filepath semantics. However, we save
|
|
|
|
// the the origPath passed in here, as it might also be a URL which
|
|
|
|
// we need to check for in this function.
|
|
|
|
passedInOrigPath := origPath
|
|
|
|
origPath = filepath.FromSlash(origPath)
|
|
|
|
destPath = filepath.FromSlash(destPath)
|
|
|
|
|
|
|
|
if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
|
2014-09-22 13:41:02 +00:00
|
|
|
origPath = origPath[1:]
|
|
|
|
}
|
2015-07-12 16:05:10 +00:00
|
|
|
origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
|
2014-09-22 13:41:02 +00:00
|
|
|
|
2014-12-12 18:32:11 +00:00
|
|
|
// Twiddle the destPath when its a relative path - meaning, make it
|
|
|
|
// relative to the WORKINGDIR
|
2015-08-26 23:39:16 +00:00
|
|
|
if !system.IsAbs(destPath) {
|
2015-07-12 16:05:10 +00:00
|
|
|
hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator))
|
|
|
|
destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath)
|
2014-12-12 18:32:11 +00:00
|
|
|
|
|
|
|
// Make sure we preserve any trailing slash
|
|
|
|
if hasSlash {
|
2015-07-12 16:05:10 +00:00
|
|
|
destPath += string(os.PathSeparator)
|
2014-12-12 18:32:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-22 13:41:02 +00:00
|
|
|
// In the remote/URL case, download it and gen its hashcode
|
2015-07-12 16:05:10 +00:00
|
|
|
if urlutil.IsURL(passedInOrigPath) {
|
|
|
|
|
|
|
|
// As it's a URL, we go back to processing on what was passed in
|
|
|
|
// to this function
|
|
|
|
origPath = passedInOrigPath
|
|
|
|
|
2014-09-22 13:41:02 +00:00
|
|
|
if !allowRemote {
|
|
|
|
return fmt.Errorf("Source can't be a URL for %s", cmdName)
|
|
|
|
}
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2014-09-22 13:41:02 +00:00
|
|
|
ci := copyInfo{}
|
|
|
|
ci.origPath = origPath
|
|
|
|
ci.hash = origPath // default to this but can change
|
|
|
|
ci.destPath = destPath
|
|
|
|
ci.decompress = false
|
|
|
|
*cInfos = append(*cInfos, &ci)
|
2014-09-16 16:58:20 +00:00
|
|
|
|
2014-08-05 20:17:40 +00:00
|
|
|
// Initiate the download
|
2015-03-29 21:17:23 +00:00
|
|
|
resp, err := httputils.Download(ci.origPath)
|
2014-08-05 20:17:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a tmp dir
|
2015-08-24 21:07:22 +00:00
|
|
|
tmpDirName, err := getTempDir(b.contextPath, "docker-remote")
|
2014-08-05 20:17:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-09-16 16:58:20 +00:00
|
|
|
ci.tmpDir = tmpDirName
|
2014-08-05 20:17:40 +00:00
|
|
|
|
|
|
|
// Create a tmp file within our tmp dir
|
2015-05-07 15:39:42 +00:00
|
|
|
tmpFileName := filepath.Join(tmpDirName, "tmp")
|
2014-08-05 20:17:40 +00:00
|
|
|
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Download and dump result to tmp file
|
2015-02-24 08:51:46 +00:00
|
|
|
if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
|
|
|
|
In: resp.Body,
|
|
|
|
Out: b.OutOld,
|
|
|
|
Formatter: b.StreamFormatter,
|
2015-07-23 21:19:58 +00:00
|
|
|
Size: resp.ContentLength,
|
2015-02-24 08:51:46 +00:00
|
|
|
NewLines: true,
|
|
|
|
ID: "",
|
|
|
|
Action: "Downloading",
|
|
|
|
})); err != nil {
|
2014-08-05 20:17:40 +00:00
|
|
|
tmpFile.Close()
|
|
|
|
return err
|
|
|
|
}
|
2014-09-03 00:17:08 +00:00
|
|
|
fmt.Fprintf(b.OutStream, "\n")
|
2014-08-05 20:17:40 +00:00
|
|
|
tmpFile.Close()
|
|
|
|
|
2014-10-22 18:16:42 +00:00
|
|
|
// Set the mtime to the Last-Modified header value if present
|
|
|
|
// Otherwise just remove atime and mtime
|
|
|
|
times := make([]syscall.Timespec, 2)
|
|
|
|
|
|
|
|
lastMod := resp.Header.Get("Last-Modified")
|
|
|
|
if lastMod != "" {
|
|
|
|
mTime, err := http.ParseTime(lastMod)
|
|
|
|
// If we can't parse it then just let it default to 'zero'
|
|
|
|
// otherwise use the parsed time value
|
|
|
|
if err == nil {
|
|
|
|
times[1] = syscall.NsecToTimespec(mTime.UnixNano())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-02 20:53:09 +00:00
|
|
|
// Windows does not support UtimesNano.
|
|
|
|
if runtime.GOOS != "windows" {
|
|
|
|
if err := system.UtimesNano(tmpFileName, times); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
|
|
|
|
2015-05-07 15:39:42 +00:00
|
|
|
ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
|
2014-08-05 20:17:40 +00:00
|
|
|
|
|
|
|
// If the destination is a directory, figure out the filename.
|
2015-07-12 16:05:10 +00:00
|
|
|
if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) {
|
2014-09-22 13:41:02 +00:00
|
|
|
u, err := url.Parse(origPath)
|
2014-08-05 20:17:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-09-02 23:24:53 +00:00
|
|
|
path := filepath.FromSlash(u.Path) // Ensure in platform semantics
|
2015-07-12 16:05:10 +00:00
|
|
|
if strings.HasSuffix(path, string(os.PathSeparator)) {
|
2014-08-05 20:17:40 +00:00
|
|
|
path = path[:len(path)-1]
|
|
|
|
}
|
2015-07-12 16:05:10 +00:00
|
|
|
parts := strings.Split(path, string(os.PathSeparator))
|
2014-08-05 20:17:40 +00:00
|
|
|
filename := parts[len(parts)-1]
|
|
|
|
if filename == "" {
|
|
|
|
return fmt.Errorf("cannot determine filename from url: %s", u)
|
|
|
|
}
|
2014-09-16 16:58:20 +00:00
|
|
|
ci.destPath = ci.destPath + filename
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
|
|
|
|
2015-01-08 14:56:30 +00:00
|
|
|
// Calc the checksum, even if we're using the cache
|
|
|
|
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-09-22 13:41:02 +00:00
|
|
|
}
|
2015-04-02 17:42:40 +00:00
|
|
|
tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1)
|
2015-01-08 14:56:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ci.hash = tarSum.Sum(nil)
|
|
|
|
r.Close()
|
2014-09-22 13:41:02 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deal with wildcards
|
2015-07-22 05:29:03 +00:00
|
|
|
if allowWildcards && containsWildcards(origPath) {
|
2014-09-22 13:41:02 +00:00
|
|
|
for _, fileInfo := range b.context.GetSums() {
|
|
|
|
if fileInfo.Name() == "" {
|
|
|
|
continue
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
2015-05-07 15:39:42 +00:00
|
|
|
match, _ := filepath.Match(origPath, fileInfo.Name())
|
2014-09-22 13:41:02 +00:00
|
|
|
if !match {
|
|
|
|
continue
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
2014-09-22 13:41:02 +00:00
|
|
|
|
2015-04-10 19:39:42 +00:00
|
|
|
// Note we set allowWildcards to false in case the name has
|
|
|
|
// a * in it
|
|
|
|
calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
|
2014-09-22 13:41:02 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Must be a dir or a file
|
|
|
|
|
|
|
|
if err := b.checkPathForAddition(origPath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-05-07 15:39:42 +00:00
|
|
|
fi, _ := os.Stat(filepath.Join(b.contextPath, origPath))
|
2014-09-22 13:41:02 +00:00
|
|
|
|
|
|
|
ci := copyInfo{}
|
|
|
|
ci.origPath = origPath
|
|
|
|
ci.hash = origPath
|
|
|
|
ci.destPath = destPath
|
|
|
|
ci.decompress = allowDecompression
|
|
|
|
*cInfos = append(*cInfos, &ci)
|
|
|
|
|
|
|
|
// Deal with the single file case
|
|
|
|
if !fi.IsDir() {
|
|
|
|
// This will match first file in sums of the archive
|
|
|
|
fis := b.context.GetSums().GetFile(ci.origPath)
|
|
|
|
if fis != nil {
|
|
|
|
ci.hash = "file:" + fis.Sum()
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
2014-09-22 13:41:02 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Must be a dir
|
|
|
|
var subfiles []string
|
2015-05-07 15:39:42 +00:00
|
|
|
absOrigPath := filepath.Join(b.contextPath, ci.origPath)
|
2014-08-05 20:17:40 +00:00
|
|
|
|
2014-09-22 13:41:02 +00:00
|
|
|
// Add a trailing / to make sure we only pick up nested files under
|
|
|
|
// the dir and not sibling files of the dir that just happen to
|
|
|
|
// start with the same chars
|
2015-07-12 16:05:10 +00:00
|
|
|
if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) {
|
|
|
|
absOrigPath += string(os.PathSeparator)
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
|
|
|
|
2015-07-12 16:05:10 +00:00
|
|
|
// Need path w/o slash too to find matching dir w/o trailing slash
|
2014-09-22 13:41:02 +00:00
|
|
|
absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
|
|
|
|
|
|
|
|
for _, fileInfo := range b.context.GetSums() {
|
2015-05-07 15:39:42 +00:00
|
|
|
absFile := filepath.Join(b.contextPath, fileInfo.Name())
|
2014-10-23 21:30:11 +00:00
|
|
|
// Any file in the context that starts with the given path will be
|
|
|
|
// picked up and its hashcode used. However, we'll exclude the
|
|
|
|
// root dir itself. We do this for a coupel of reasons:
|
|
|
|
// 1 - ADD/COPY will not copy the dir itself, just its children
|
|
|
|
// so there's no reason to include it in the hash calc
|
|
|
|
// 2 - the metadata on the dir will change when any child file
|
|
|
|
// changes. This will lead to a miss in the cache check if that
|
|
|
|
// child file is in the .dockerignore list.
|
|
|
|
if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
|
2014-09-22 13:41:02 +00:00
|
|
|
subfiles = append(subfiles, fileInfo.Sum())
|
|
|
|
}
|
2014-08-05 20:17:40 +00:00
|
|
|
}
|
2014-09-22 13:41:02 +00:00
|
|
|
sort.Strings(subfiles)
|
|
|
|
hasher := sha256.New()
|
|
|
|
hasher.Write([]byte(strings.Join(subfiles, ",")))
|
|
|
|
ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
|
|
|
|
|
2014-08-05 20:17:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
2014-08-05 22:41:09 +00:00
|
|
|
|
2015-07-22 05:29:03 +00:00
|
|
|
func containsWildcards(name string) bool {
|
2014-09-22 13:41:02 +00:00
|
|
|
for i := 0; i < len(name); i++ {
|
|
|
|
ch := name[i]
|
|
|
|
if ch == '\\' {
|
|
|
|
i++
|
|
|
|
} else if ch == '*' || ch == '?' || ch == '[' {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
func (b *builder) pullImage(ctx context.Context, name string) (*image.Image, error) {
|
2014-08-05 22:41:09 +00:00
|
|
|
remote, tag := parsers.ParseRepositoryTag(name)
|
2014-09-03 15:27:25 +00:00
|
|
|
if tag == "" {
|
|
|
|
tag = "latest"
|
|
|
|
}
|
2015-04-15 11:43:15 +00:00
|
|
|
|
2015-06-23 19:58:17 +00:00
|
|
|
pullRegistryAuth := &cliconfig.AuthConfig{}
|
|
|
|
if len(b.AuthConfigs) > 0 {
|
2014-08-05 22:41:09 +00:00
|
|
|
// The request came with a full auth config file, we prefer to use that
|
2015-03-31 23:21:37 +00:00
|
|
|
repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote)
|
2014-08-05 22:41:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-06-23 19:58:17 +00:00
|
|
|
|
|
|
|
resolvedConfig := registry.ResolveAuthConfig(
|
|
|
|
&cliconfig.ConfigFile{AuthConfigs: b.AuthConfigs},
|
|
|
|
repoInfo.Index,
|
|
|
|
)
|
|
|
|
pullRegistryAuth = &resolvedConfig
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2015-04-15 11:43:15 +00:00
|
|
|
|
|
|
|
imagePullConfig := &graph.ImagePullConfig{
|
|
|
|
AuthConfig: pullRegistryAuth,
|
|
|
|
OutStream: ioutils.NopWriteCloser(b.OutOld),
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
if err := b.Daemon.Repositories(ctx).Pull(ctx, remote, tag, imagePullConfig); err != nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2015-04-15 11:43:15 +00:00
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
image, err := b.Daemon.Repositories(ctx).LookupImage(name)
|
2014-08-05 22:41:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return image, nil
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
func (b *builder) processImageFrom(ctx context.Context, img *image.Image) error {
|
2014-08-05 22:41:09 +00:00
|
|
|
b.image = img.ID
|
2014-08-13 10:07:41 +00:00
|
|
|
|
2014-08-05 22:41:09 +00:00
|
|
|
if img.Config != nil {
|
2014-08-11 15:44:31 +00:00
|
|
|
b.Config = img.Config
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2014-08-13 10:07:41 +00:00
|
|
|
|
2015-06-01 23:42:27 +00:00
|
|
|
// The default path will be blank on Windows (set by HCS)
|
|
|
|
if len(b.Config.Env) == 0 && daemon.DefaultPathEnv != "" {
|
2014-08-11 15:44:31 +00:00
|
|
|
b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2014-08-13 10:07:41 +00:00
|
|
|
|
2014-08-05 22:41:09 +00:00
|
|
|
// Process ONBUILD triggers if they exist
|
2014-08-11 15:44:31 +00:00
|
|
|
if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
|
2015-08-27 02:45:18 +00:00
|
|
|
word := "trigger"
|
|
|
|
if nTriggers > 1 {
|
|
|
|
word = "triggers"
|
|
|
|
}
|
|
|
|
fmt.Fprintf(b.ErrStream, "# Executing %d build %s...\n", nTriggers, word)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
2015-04-27 20:33:30 +00:00
|
|
|
// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
|
2014-08-11 15:44:31 +00:00
|
|
|
onBuildTriggers := b.Config.OnBuild
|
|
|
|
b.Config.OnBuild = []string{}
|
2014-08-05 22:41:09 +00:00
|
|
|
|
2014-10-13 20:14:35 +00:00
|
|
|
// parse the ONBUILD triggers by invoking the parser
|
2015-08-27 02:45:18 +00:00
|
|
|
for _, step := range onBuildTriggers {
|
2014-10-13 20:14:35 +00:00
|
|
|
ast, err := parser.Parse(strings.NewReader(step))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
2014-10-13 20:14:35 +00:00
|
|
|
for i, n := range ast.Children {
|
|
|
|
switch strings.ToUpper(n.Value) {
|
|
|
|
case "ONBUILD":
|
|
|
|
return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
|
|
|
case "MAINTAINER", "FROM":
|
|
|
|
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
|
|
|
|
}
|
2014-08-05 22:41:09 +00:00
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
if err := b.dispatch(ctx, i, n); err != nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-26 19:25:44 +00:00
|
|
|
// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
|
2014-08-11 15:44:31 +00:00
|
|
|
// and if so attempts to look up the current `b.image` and `b.Config` pair
|
2014-08-26 19:25:44 +00:00
|
|
|
// in the current server `b.Daemon`. If an image is found, probeCache returns
|
2014-08-05 22:41:09 +00:00
|
|
|
// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
|
|
|
|
// is any error, it returns `(false, err)`.
|
2015-09-10 22:01:18 +00:00
|
|
|
func (b *builder) probeCache(ctx context.Context) (bool, error) {
|
2015-02-25 18:27:32 +00:00
|
|
|
if !b.UtilizeCache || b.cacheBusted {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
cache, err := b.Daemon.ImageGetCached(ctx, b.image, b.Config)
|
2015-02-25 18:27:32 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if cache == nil {
|
2015-03-26 22:22:04 +00:00
|
|
|
logrus.Debugf("[BUILDER] Cache miss")
|
2015-02-25 18:27:32 +00:00
|
|
|
b.cacheBusted = true
|
|
|
|
return false, nil
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2015-02-25 18:27:32 +00:00
|
|
|
|
|
|
|
fmt.Fprintf(b.OutStream, " ---> Using cache\n")
|
2015-03-26 22:22:04 +00:00
|
|
|
logrus.Debugf("[BUILDER] Use cached version")
|
2015-02-25 18:27:32 +00:00
|
|
|
b.image = cache.ID
|
2015-09-10 22:01:18 +00:00
|
|
|
b.Daemon.Graph(ctx).Retain(b.id, cache.ID)
|
2015-04-08 02:29:29 +00:00
|
|
|
b.activeImages = append(b.activeImages, cache.ID)
|
2015-02-25 18:27:32 +00:00
|
|
|
return true, nil
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
func (b *builder) create(ctx context.Context) (*daemon.Container, error) {
|
2014-10-28 21:06:23 +00:00
|
|
|
if b.image == "" && !b.noBaseImage {
|
2014-08-05 22:41:09 +00:00
|
|
|
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
|
|
}
|
2014-08-11 15:44:31 +00:00
|
|
|
b.Config.Image = b.image
|
2014-08-05 22:41:09 +00:00
|
|
|
|
2015-02-06 14:33:01 +00:00
|
|
|
hostConfig := &runconfig.HostConfig{
|
2015-07-25 09:11:45 +00:00
|
|
|
CPUShares: b.cpuShares,
|
|
|
|
CPUPeriod: b.cpuPeriod,
|
|
|
|
CPUQuota: b.cpuQuota,
|
2015-03-26 23:14:31 +00:00
|
|
|
CpusetCpus: b.cpuSetCpus,
|
|
|
|
CpusetMems: b.cpuSetMems,
|
|
|
|
CgroupParent: b.cgroupParent,
|
|
|
|
Memory: b.memory,
|
|
|
|
MemorySwap: b.memorySwap,
|
2015-07-23 02:26:06 +00:00
|
|
|
Ulimits: b.ulimits,
|
2015-02-06 14:33:01 +00:00
|
|
|
}
|
|
|
|
|
2014-09-04 15:43:28 +00:00
|
|
|
config := *b.Config
|
|
|
|
|
2014-08-05 22:41:09 +00:00
|
|
|
// Create the container
|
2015-09-10 22:01:18 +00:00
|
|
|
c, warnings, err := b.Daemon.ContainerCreate(ctx, "", b.Config, hostConfig, true)
|
2014-08-05 22:41:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-09-04 15:43:28 +00:00
|
|
|
for _, warning := range warnings {
|
|
|
|
fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
|
|
|
|
}
|
2014-08-26 19:25:44 +00:00
|
|
|
|
2014-08-11 15:44:31 +00:00
|
|
|
b.TmpContainers[c.ID] = struct{}{}
|
2015-03-24 11:25:26 +00:00
|
|
|
fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID))
|
2014-08-05 22:41:09 +00:00
|
|
|
|
2015-04-11 00:05:21 +00:00
|
|
|
if config.Cmd.Len() > 0 {
|
2015-01-09 01:00:00 +00:00
|
|
|
// override the entry point that may have been picked up from the base image
|
2015-04-11 00:05:21 +00:00
|
|
|
s := config.Cmd.Slice()
|
|
|
|
c.Path = s[0]
|
|
|
|
c.Args = s[1:]
|
2015-01-09 01:00:00 +00:00
|
|
|
} else {
|
2015-08-28 15:29:10 +00:00
|
|
|
config.Cmd = stringutils.NewStrSlice()
|
2015-01-09 01:00:00 +00:00
|
|
|
}
|
2014-08-05 22:41:09 +00:00
|
|
|
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
func (b *builder) run(ctx context.Context, c *daemon.Container) error {
|
2015-02-04 23:37:14 +00:00
|
|
|
var errCh chan error
|
|
|
|
if b.Verbose {
|
2015-04-06 19:19:38 +00:00
|
|
|
errCh = c.Attach(nil, b.OutStream, b.ErrStream)
|
2015-02-04 23:37:14 +00:00
|
|
|
}
|
|
|
|
|
2014-08-05 22:41:09 +00:00
|
|
|
//start the container
|
2015-09-10 22:01:18 +00:00
|
|
|
if err := c.Start(ctx); err != nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-03-10 22:10:00 +00:00
|
|
|
finished := make(chan struct{})
|
|
|
|
defer close(finished)
|
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case <-b.cancelled:
|
2015-03-26 22:22:04 +00:00
|
|
|
logrus.Debugln("Build cancelled, killing container:", c.ID)
|
2015-09-10 22:01:18 +00:00
|
|
|
c.Kill(ctx)
|
2015-03-10 22:10:00 +00:00
|
|
|
case <-finished:
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2015-02-20 21:26:11 +00:00
|
|
|
if b.Verbose {
|
|
|
|
// Block on reading output from container, stop on err or chan closed
|
|
|
|
if err := <-errCh; err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for it to finish
|
2014-08-31 15:20:35 +00:00
|
|
|
if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
|
2015-04-16 19:22:32 +00:00
|
|
|
return &jsonmessage.JSONError{
|
2015-05-09 16:22:30 +00:00
|
|
|
Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
|
2014-08-05 22:41:09 +00:00
|
|
|
Code: ret,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-22 05:29:03 +00:00
|
|
|
func (b *builder) checkPathForAddition(orig string) error {
|
2015-05-07 15:39:42 +00:00
|
|
|
origPath := filepath.Join(b.contextPath, orig)
|
2015-08-24 21:07:22 +00:00
|
|
|
origPath, err := symlink.EvalSymlinks(origPath)
|
2014-08-13 10:07:41 +00:00
|
|
|
if err != nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return fmt.Errorf("%s: no such file or directory", orig)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2015-08-24 21:07:22 +00:00
|
|
|
contextPath, err := symlink.EvalSymlinks(b.contextPath)
|
2015-05-07 15:39:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !strings.HasPrefix(origPath, contextPath) {
|
2014-08-05 22:41:09 +00:00
|
|
|
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
|
|
|
}
|
2014-08-13 10:07:41 +00:00
|
|
|
if _, err := os.Stat(origPath); err != nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return fmt.Errorf("%s: no such file or directory", orig)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-22 05:29:03 +00:00
|
|
|
func (b *builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
|
2014-08-05 22:41:09 +00:00
|
|
|
var (
|
|
|
|
err error
|
|
|
|
destExists = true
|
2015-05-07 15:39:42 +00:00
|
|
|
origPath = filepath.Join(b.contextPath, orig)
|
2015-03-20 08:59:15 +00:00
|
|
|
destPath string
|
2014-08-05 22:41:09 +00:00
|
|
|
)
|
|
|
|
|
2015-07-12 16:05:10 +00:00
|
|
|
// Work in daemon-local OS specific file paths
|
|
|
|
dest = filepath.FromSlash(dest)
|
|
|
|
|
2015-03-20 08:59:15 +00:00
|
|
|
destPath, err = container.GetResourcePath(dest)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
2015-07-12 16:05:10 +00:00
|
|
|
// Preserve the trailing slash
|
|
|
|
if strings.HasSuffix(dest, string(os.PathSeparator)) || dest == "." {
|
|
|
|
destPath = destPath + string(os.PathSeparator)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
destStat, err := os.Stat(destPath)
|
|
|
|
if err != nil {
|
|
|
|
if !os.IsNotExist(err) {
|
2015-07-12 16:05:10 +00:00
|
|
|
logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
|
2014-08-05 22:41:09 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
destExists = false
|
|
|
|
}
|
|
|
|
|
|
|
|
fi, err := os.Stat(origPath)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return fmt.Errorf("%s: no such file or directory", orig)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if fi.IsDir() {
|
2014-12-11 18:40:16 +00:00
|
|
|
return copyAsDirectory(origPath, destPath, destExists)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
|
|
|
|
if decompress {
|
|
|
|
// First try to unpack the source as an archive
|
|
|
|
// to support the untar feature we need to clean up the path a little bit
|
|
|
|
// because tar is very forgiving. First we need to strip off the archive's
|
2015-07-12 16:05:10 +00:00
|
|
|
// filename from the path but this is only added if it does not end in slash
|
2014-08-05 22:41:09 +00:00
|
|
|
tarDest := destPath
|
2015-07-12 16:05:10 +00:00
|
|
|
if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
|
2014-08-05 22:41:09 +00:00
|
|
|
tarDest = filepath.Dir(destPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
// try to successfully untar the orig
|
2014-10-29 19:06:51 +00:00
|
|
|
if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
return nil
|
|
|
|
} else if err != io.EOF {
|
2015-03-26 22:22:04 +00:00
|
|
|
logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-07 15:39:42 +00:00
|
|
|
if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-10-29 19:06:51 +00:00
|
|
|
if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
resPath := destPath
|
|
|
|
if destExists && destStat.IsDir() {
|
2015-05-07 15:39:42 +00:00
|
|
|
resPath = filepath.Join(destPath, filepath.Base(origPath))
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
2014-12-11 18:40:16 +00:00
|
|
|
return fixPermissions(origPath, resPath, 0, 0, destExists)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
2014-12-11 18:40:16 +00:00
|
|
|
func copyAsDirectory(source, destination string, destExisted bool) error {
|
2014-10-29 19:06:51 +00:00
|
|
|
if err := chrootarchive.CopyWithTar(source, destination); err != nil {
|
2014-08-05 22:41:09 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-12-11 18:40:16 +00:00
|
|
|
return fixPermissions(source, destination, 0, 0, destExisted)
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
|
2015-09-10 22:01:18 +00:00
|
|
|
func (b *builder) clearTmp(ctx context.Context) {
|
2014-08-19 11:14:21 +00:00
|
|
|
for c := range b.TmpContainers {
|
2015-05-26 19:38:52 +00:00
|
|
|
rmConfig := &daemon.ContainerRmConfig{
|
|
|
|
ForceRemove: true,
|
|
|
|
RemoveVolume: true,
|
2014-12-16 23:06:35 +00:00
|
|
|
}
|
2015-09-10 22:01:18 +00:00
|
|
|
if err := b.Daemon.ContainerRm(ctx, c, rmConfig); err != nil {
|
2015-03-24 11:25:26 +00:00
|
|
|
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
|
2014-08-28 14:18:08 +00:00
|
|
|
return
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
2014-08-28 14:18:08 +00:00
|
|
|
delete(b.TmpContainers, c)
|
2015-03-24 11:25:26 +00:00
|
|
|
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
|
2014-08-05 22:41:09 +00:00
|
|
|
}
|
|
|
|
}
|