123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645 |
- package dockerfile
- // internals for handling commands. Covers many areas and a lot of
- // non-contiguous functionality. Please read the comments.
- import (
- "context"
- "crypto/sha256"
- "encoding/hex"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "runtime"
- "sort"
- "strings"
- "time"
- "github.com/Sirupsen/logrus"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/backend"
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/builder"
- "github.com/docker/docker/builder/remotecontext"
- "github.com/docker/docker/pkg/httputils"
- "github.com/docker/docker/pkg/ioutils"
- "github.com/docker/docker/pkg/jsonmessage"
- "github.com/docker/docker/pkg/progress"
- "github.com/docker/docker/pkg/streamformatter"
- "github.com/docker/docker/pkg/stringid"
- "github.com/docker/docker/pkg/system"
- "github.com/docker/docker/pkg/urlutil"
- "github.com/pkg/errors"
- )
- func (b *Builder) commit(dispatchState *dispatchState, comment string) error {
- if b.disableCommit {
- return nil
- }
- if !dispatchState.hasFromImage() {
- return errors.New("Please provide a source image with `from` prior to commit")
- }
- runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment))
- hit, err := b.probeCache(dispatchState, runConfigWithCommentCmd)
- if err != nil || hit {
- return err
- }
- id, err := b.create(runConfigWithCommentCmd)
- if err != nil {
- return err
- }
- return b.commitContainer(dispatchState, id, runConfigWithCommentCmd)
- }
- // TODO: see if any args can be dropped
- func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error {
- if b.disableCommit {
- return nil
- }
- commitCfg := &backend.ContainerCommitConfig{
- ContainerCommitConfig: types.ContainerCommitConfig{
- Author: dispatchState.maintainer,
- Pause: true,
- // TODO: this should be done by Commit()
- Config: copyRunConfig(dispatchState.runConfig),
- },
- ContainerConfig: containerConfig,
- }
- // Commit the container
- imageID, err := b.docker.Commit(id, commitCfg)
- if err != nil {
- return err
- }
- dispatchState.imageID = imageID
- b.buildStages.update(imageID, dispatchState.runConfig)
- return nil
- }
- type copyInfo struct {
- root string
- path string
- hash string
- decompress bool
- }
- // TODO: this needs to be split so that a Builder method doesn't accept req
- func (b *Builder) runContextCommand(req dispatchRequest, allowRemote bool, allowLocalDecompression bool, cmdName string, imageSource *imageMount) error {
- args := req.args
- if len(args) < 2 {
- return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
- }
- // Work in daemon-specific filepath semantics
- dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest
- var infos []copyInfo
- // Loop through each src file and calculate the info we need to
- // do the copy (e.g. hash value if cached). Don't actually do
- // the copy until we've looked at all src files
- var err error
- for _, orig := range args[0 : len(args)-1] {
- if urlutil.IsURL(orig) {
- if !allowRemote {
- return fmt.Errorf("Source can't be a URL for %s", cmdName)
- }
- remote, path, err := b.download(orig)
- if err != nil {
- return err
- }
- defer os.RemoveAll(remote.Root())
- h, err := remote.Hash(path)
- if err != nil {
- return err
- }
- infos = append(infos, copyInfo{
- root: remote.Root(),
- path: path,
- hash: h,
- })
- continue
- }
- // not a URL
- subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true, imageSource)
- if err != nil {
- return err
- }
- infos = append(infos, subInfos...)
- }
- if len(infos) == 0 {
- return errors.New("No source files were specified")
- }
- if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
- return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
- }
- // For backwards compat, if there's just one info then use it as the
- // cache look-up string, otherwise hash 'em all into one
- var srcHash string
- if len(infos) == 1 {
- info := infos[0]
- srcHash = info.hash
- } else {
- var hashs []string
- var origs []string
- for _, info := range infos {
- origs = append(origs, info.path)
- hashs = append(hashs, info.hash)
- }
- hasher := sha256.New()
- hasher.Write([]byte(strings.Join(hashs, ",")))
- srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
- }
- // TODO: should this have been using origPaths instead of srcHash in the comment?
- runConfigWithCommentCmd := copyRunConfig(
- req.state.runConfig,
- withCmdCommentString(fmt.Sprintf("%s %s in %s ", cmdName, srcHash, dest)))
- if hit, err := b.probeCache(req.state, runConfigWithCommentCmd); err != nil || hit {
- return err
- }
- container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{
- Config: runConfigWithCommentCmd,
- // Set a log config to override any default value set on the daemon
- HostConfig: &container.HostConfig{LogConfig: defaultLogConfig},
- })
- if err != nil {
- return err
- }
- b.tmpContainers[container.ID] = struct{}{}
- // Twiddle the destination when it's a relative path - meaning, make it
- // relative to the WORKINGDIR
- if dest, err = normaliseDest(cmdName, req.state.runConfig.WorkingDir, dest); err != nil {
- return err
- }
- for _, info := range infos {
- if err := b.docker.CopyOnBuild(container.ID, dest, info.root, info.path, info.decompress); err != nil {
- return err
- }
- }
- return b.commitContainer(req.state, container.ID, runConfigWithCommentCmd)
- }
- type runConfigModifier func(*container.Config)
- func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config {
- copy := *runConfig
- for _, modifier := range modifiers {
- modifier(©)
- }
- return ©
- }
- func withCmd(cmd []string) runConfigModifier {
- return func(runConfig *container.Config) {
- runConfig.Cmd = cmd
- }
- }
- // withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for
- // why there are two almost identical versions of this.
- func withCmdComment(comment string) runConfigModifier {
- return func(runConfig *container.Config) {
- runConfig.Cmd = append(getShell(runConfig), "#(nop) ", comment)
- }
- }
- // withCmdCommentString exists to maintain compatibility with older versions.
- // A few instructions (workdir, copy, add) used a nop comment that is a single arg
- // where as all the other instructions used a two arg comment string. This
- // function implements the single arg version.
- func withCmdCommentString(comment string) runConfigModifier {
- return func(runConfig *container.Config) {
- runConfig.Cmd = append(getShell(runConfig), "#(nop) "+comment)
- }
- }
- func withEnv(env []string) runConfigModifier {
- return func(runConfig *container.Config) {
- runConfig.Env = env
- }
- }
- // withEntrypointOverride sets an entrypoint on runConfig if the command is
- // not empty. The entrypoint is left unmodified if command is empty.
- //
- // The dockerfile RUN instruction expect to run without an entrypoint
- // so the runConfig entrypoint needs to be modified accordingly. ContainerCreate
- // will change a []string{""} entrypoint to nil, so we probe the cache with the
- // nil entrypoint.
- func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier {
- return func(runConfig *container.Config) {
- if len(cmd) > 0 {
- runConfig.Entrypoint = entrypoint
- }
- }
- }
- // getShell is a helper function which gets the right shell for prefixing the
- // shell-form of RUN, ENTRYPOINT and CMD instructions
- func getShell(c *container.Config) []string {
- if 0 == len(c.Shell) {
- return append([]string{}, defaultShell[:]...)
- }
- return append([]string{}, c.Shell[:]...)
- }
- func (b *Builder) download(srcURL string) (remote builder.Source, p string, err error) {
- // get filename from URL
- u, err := url.Parse(srcURL)
- if err != nil {
- return
- }
- path := filepath.FromSlash(u.Path) // Ensure in platform semantics
- if strings.HasSuffix(path, string(os.PathSeparator)) {
- path = path[:len(path)-1]
- }
- parts := strings.Split(path, string(os.PathSeparator))
- filename := parts[len(parts)-1]
- if filename == "" {
- err = fmt.Errorf("cannot determine filename from url: %s", u)
- return
- }
- // Initiate the download
- resp, err := httputils.Download(srcURL)
- if err != nil {
- return
- }
- // Prepare file in a tmp dir
- tmpDir, err := ioutils.TempDir("", "docker-remote")
- if err != nil {
- return
- }
- defer func() {
- if err != nil {
- os.RemoveAll(tmpDir)
- }
- }()
- tmpFileName := filepath.Join(tmpDir, filename)
- tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
- if err != nil {
- return
- }
- progressOutput := streamformatter.NewJSONProgressOutput(b.Output, true)
- progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading")
- // Download and dump result to tmp file
- // TODO: add filehash directly
- if _, err = io.Copy(tmpFile, progressReader); err != nil {
- tmpFile.Close()
- return
- }
- fmt.Fprintln(b.Stdout)
- // Set the mtime to the Last-Modified header value if present
- // Otherwise just remove atime and mtime
- mTime := time.Time{}
- lastMod := resp.Header.Get("Last-Modified")
- if lastMod != "" {
- // If we can't parse it then just let it default to 'zero'
- // otherwise use the parsed time value
- if parsedMTime, err := http.ParseTime(lastMod); err == nil {
- mTime = parsedMTime
- }
- }
- tmpFile.Close()
- if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
- return
- }
- lc, err := remotecontext.NewLazyContext(tmpDir)
- if err != nil {
- return
- }
- return lc, filename, nil
- }
- var windowsBlacklist = map[string]bool{
- "c:\\": true,
- "c:\\windows": true,
- }
- func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool, imageSource *imageMount) ([]copyInfo, error) {
- // Work in daemon-specific OS filepath semantics
- origPath = filepath.FromSlash(origPath)
- // validate windows paths from other images
- if imageSource != nil && runtime.GOOS == "windows" {
- p := strings.ToLower(filepath.Clean(origPath))
- if !filepath.IsAbs(p) {
- if filepath.VolumeName(p) != "" {
- if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
- p = p[:len(p)-1]
- }
- p += "\\"
- } else {
- p = filepath.Join("c:\\", p)
- }
- }
- if _, blacklisted := windowsBlacklist[p]; blacklisted {
- return nil, errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
- }
- }
- if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
- origPath = origPath[1:]
- }
- origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
- source := b.source
- var err error
- if imageSource != nil {
- source, err = imageSource.Source()
- if err != nil {
- return nil, errors.Wrapf(err, "failed to copy")
- }
- }
- if source == nil {
- return nil, errors.Errorf("No context given. Impossible to use %s", cmdName)
- }
- // Deal with wildcards
- if allowWildcards && containsWildcards(origPath) {
- var copyInfos []copyInfo
- if err := filepath.Walk(source.Root(), func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- rel, err := remotecontext.Rel(source.Root(), path)
- if err != nil {
- return err
- }
- if rel == "." {
- return nil
- }
- if match, _ := filepath.Match(origPath, rel); !match {
- return nil
- }
- // Note we set allowWildcards to false in case the name has
- // a * in it
- subInfos, err := b.calcCopyInfo(cmdName, rel, allowLocalDecompression, false, imageSource)
- if err != nil {
- return err
- }
- copyInfos = append(copyInfos, subInfos...)
- return nil
- }); err != nil {
- return nil, err
- }
- return copyInfos, nil
- }
- // Must be a dir or a file
- hash, err := source.Hash(origPath)
- if err != nil {
- return nil, err
- }
- fi, err := remotecontext.StatAt(source, origPath)
- if err != nil {
- return nil, err
- }
- // TODO: remove, handle dirs in Hash()
- copyInfos := []copyInfo{{root: source.Root(), path: origPath, hash: hash, decompress: allowLocalDecompression}}
- if imageSource != nil {
- // fast-cache based on imageID
- if h, ok := b.imageSources.getCache(imageSource.Image().ImageID(), origPath); ok {
- copyInfos[0].hash = h.(string)
- return copyInfos, nil
- }
- }
- // Deal with the single file case
- if !fi.IsDir() {
- copyInfos[0].hash = "file:" + copyInfos[0].hash
- return copyInfos, nil
- }
- fp, err := remotecontext.FullPath(source, origPath)
- if err != nil {
- return nil, err
- }
- // Must be a dir
- var subfiles []string
- err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- rel, err := remotecontext.Rel(source.Root(), path)
- if err != nil {
- return err
- }
- if rel == "." {
- return nil
- }
- hash, err := source.Hash(rel)
- if err != nil {
- return nil
- }
- // we already checked handleHash above
- subfiles = append(subfiles, hash)
- return nil
- })
- if err != nil {
- return nil, err
- }
- sort.Strings(subfiles)
- hasher := sha256.New()
- hasher.Write([]byte(strings.Join(subfiles, ",")))
- copyInfos[0].hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
- if imageSource != nil {
- b.imageSources.setCache(imageSource.Image().ImageID(), origPath, copyInfos[0].hash)
- }
- return copyInfos, nil
- }
- // probeCache checks if cache match can be found for current build instruction.
- // If an image is found, probeCache returns `(true, nil)`.
- // If no image is found, it returns `(false, nil)`.
- // If there is any error, it returns `(false, err)`.
- func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) {
- c := b.imageCache
- if c == nil || b.options.NoCache || b.cacheBusted {
- return false, nil
- }
- cache, err := c.GetCache(dispatchState.imageID, runConfig)
- if err != nil {
- return false, err
- }
- if len(cache) == 0 {
- logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd)
- b.cacheBusted = true
- return false, nil
- }
- fmt.Fprint(b.Stdout, " ---> Using cache\n")
- logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd)
- dispatchState.imageID = string(cache)
- b.buildStages.update(dispatchState.imageID, runConfig)
- return true, nil
- }
- func (b *Builder) create(runConfig *container.Config) (string, error) {
- resources := container.Resources{
- CgroupParent: b.options.CgroupParent,
- CPUShares: b.options.CPUShares,
- CPUPeriod: b.options.CPUPeriod,
- CPUQuota: b.options.CPUQuota,
- CpusetCpus: b.options.CPUSetCPUs,
- CpusetMems: b.options.CPUSetMems,
- Memory: b.options.Memory,
- MemorySwap: b.options.MemorySwap,
- Ulimits: b.options.Ulimits,
- }
- // TODO: why not embed a hostconfig in builder?
- hostConfig := &container.HostConfig{
- SecurityOpt: b.options.SecurityOpt,
- Isolation: b.options.Isolation,
- ShmSize: b.options.ShmSize,
- Resources: resources,
- NetworkMode: container.NetworkMode(b.options.NetworkMode),
- // Set a log config to override any default value set on the daemon
- LogConfig: defaultLogConfig,
- ExtraHosts: b.options.ExtraHosts,
- }
- // Create the container
- c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{
- Config: runConfig,
- HostConfig: hostConfig,
- })
- if err != nil {
- return "", err
- }
- for _, warning := range c.Warnings {
- fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning)
- }
- b.tmpContainers[c.ID] = struct{}{}
- fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID))
- return c.ID, nil
- }
- var errCancelled = errors.New("build cancelled")
- func (b *Builder) run(cID string, cmd []string) (err error) {
- attached := make(chan struct{})
- errCh := make(chan error)
- go func() {
- errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true, attached)
- }()
- select {
- case err := <-errCh:
- return err
- case <-attached:
- }
- finished := make(chan struct{})
- cancelErrCh := make(chan error, 1)
- go func() {
- select {
- case <-b.clientCtx.Done():
- logrus.Debugln("Build cancelled, killing and removing container:", cID)
- b.docker.ContainerKill(cID, 0)
- b.removeContainer(cID)
- cancelErrCh <- errCancelled
- case <-finished:
- cancelErrCh <- nil
- }
- }()
- if err := b.docker.ContainerStart(cID, nil, "", ""); err != nil {
- close(finished)
- if cancelErr := <-cancelErrCh; cancelErr != nil {
- logrus.Debugf("Build cancelled (%v) and got an error from ContainerStart: %v",
- cancelErr, err)
- }
- return err
- }
- // Block on reading output from container, stop on err or chan closed
- if err := <-errCh; err != nil {
- close(finished)
- if cancelErr := <-cancelErrCh; cancelErr != nil {
- logrus.Debugf("Build cancelled (%v) and got an error from errCh: %v",
- cancelErr, err)
- }
- return err
- }
- waitC, err := b.docker.ContainerWait(context.Background(), cID, false)
- if err != nil {
- // Unable to begin waiting for container.
- close(finished)
- if cancelErr := <-cancelErrCh; cancelErr != nil {
- logrus.Debugf("Build cancelled (%v) and unable to begin ContainerWait: %d", cancelErr, err)
- }
- return err
- }
- if status := <-waitC; status.ExitCode() != 0 {
- close(finished)
- if cancelErr := <-cancelErrCh; cancelErr != nil {
- logrus.Debugf("Build cancelled (%v) and got a non-zero code from ContainerWait: %d", cancelErr, status.ExitCode())
- }
- // TODO: change error type, because jsonmessage.JSONError assumes HTTP
- return &jsonmessage.JSONError{
- Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(cmd, " "), status.ExitCode()),
- Code: status.ExitCode(),
- }
- }
- close(finished)
- return <-cancelErrCh
- }
- func (b *Builder) removeContainer(c string) error {
- rmConfig := &types.ContainerRmConfig{
- ForceRemove: true,
- RemoveVolume: true,
- }
- if err := b.docker.ContainerRm(c, rmConfig); err != nil {
- fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
- return err
- }
- return nil
- }
- func (b *Builder) clearTmp() {
- for c := range b.tmpContainers {
- if err := b.removeContainer(c); err != nil {
- return
- }
- delete(b.tmpContainers, c)
- fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c))
- }
- }
|