internals.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. package builder
  2. // internals for handling commands. Covers many areas and a lot of
  3. // non-contiguous functionality. Please read the comments.
  4. import (
  5. "crypto/sha256"
  6. "encoding/hex"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "net/http"
  11. "net/url"
  12. "os"
  13. "path/filepath"
  14. "runtime"
  15. "sort"
  16. "strings"
  17. "syscall"
  18. "time"
  19. "github.com/Sirupsen/logrus"
  20. "github.com/docker/docker/builder/parser"
  21. "github.com/docker/docker/cliconfig"
  22. "github.com/docker/docker/context"
  23. "github.com/docker/docker/daemon"
  24. "github.com/docker/docker/graph"
  25. "github.com/docker/docker/image"
  26. "github.com/docker/docker/pkg/archive"
  27. "github.com/docker/docker/pkg/chrootarchive"
  28. "github.com/docker/docker/pkg/httputils"
  29. "github.com/docker/docker/pkg/ioutils"
  30. "github.com/docker/docker/pkg/jsonmessage"
  31. "github.com/docker/docker/pkg/parsers"
  32. "github.com/docker/docker/pkg/progressreader"
  33. "github.com/docker/docker/pkg/stringid"
  34. "github.com/docker/docker/pkg/stringutils"
  35. "github.com/docker/docker/pkg/symlink"
  36. "github.com/docker/docker/pkg/system"
  37. "github.com/docker/docker/pkg/tarsum"
  38. "github.com/docker/docker/pkg/urlutil"
  39. "github.com/docker/docker/registry"
  40. "github.com/docker/docker/runconfig"
  41. )
  42. func (b *builder) readContext(context io.Reader) (err error) {
  43. tmpdirPath, err := getTempDir("", "docker-build")
  44. if err != nil {
  45. return
  46. }
  47. // Make sure we clean-up upon error. In the happy case the caller
  48. // is expected to manage the clean-up
  49. defer func() {
  50. if err != nil {
  51. if e := os.RemoveAll(tmpdirPath); e != nil {
  52. logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e)
  53. }
  54. }
  55. }()
  56. decompressedStream, err := archive.DecompressStream(context)
  57. if err != nil {
  58. return
  59. }
  60. if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil {
  61. return
  62. }
  63. if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
  64. return
  65. }
  66. b.contextPath = tmpdirPath
  67. return
  68. }
  69. func (b *builder) commit(ctx context.Context, id string, autoCmd *stringutils.StrSlice, comment string) error {
  70. if b.disableCommit {
  71. return nil
  72. }
  73. if b.image == "" && !b.noBaseImage {
  74. return fmt.Errorf("Please provide a source image with `from` prior to commit")
  75. }
  76. b.Config.Image = b.image
  77. if id == "" {
  78. cmd := b.Config.Cmd
  79. if runtime.GOOS != "windows" {
  80. b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", "#(nop) "+comment)
  81. } else {
  82. b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S /C", "REM (nop) "+comment)
  83. }
  84. defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
  85. hit, err := b.probeCache(ctx)
  86. if err != nil {
  87. return err
  88. }
  89. if hit {
  90. return nil
  91. }
  92. container, err := b.create(ctx)
  93. if err != nil {
  94. return err
  95. }
  96. id = container.ID
  97. if err := container.Mount(ctx); err != nil {
  98. return err
  99. }
  100. defer container.Unmount(ctx)
  101. }
  102. container, err := b.Daemon.Get(ctx, id)
  103. if err != nil {
  104. return err
  105. }
  106. // Note: Actually copy the struct
  107. autoConfig := *b.Config
  108. autoConfig.Cmd = autoCmd
  109. commitCfg := &daemon.ContainerCommitConfig{
  110. Author: b.maintainer,
  111. Pause: true,
  112. Config: &autoConfig,
  113. }
  114. // Commit the container
  115. image, err := b.Daemon.Commit(ctx, container, commitCfg)
  116. if err != nil {
  117. return err
  118. }
  119. b.Daemon.Graph().Retain(b.id, image.ID)
  120. b.activeImages = append(b.activeImages, image.ID)
  121. b.image = image.ID
  122. return nil
  123. }
  124. type copyInfo struct {
  125. origPath string
  126. destPath string
  127. hash string
  128. decompress bool
  129. tmpDir string
  130. }
  131. func (b *builder) runContextCommand(ctx context.Context, args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
  132. if b.context == nil {
  133. return fmt.Errorf("No context given. Impossible to use %s", cmdName)
  134. }
  135. if len(args) < 2 {
  136. return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
  137. }
  138. // Work in daemon-specific filepath semantics
  139. dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest
  140. copyInfos := []*copyInfo{}
  141. b.Config.Image = b.image
  142. defer func() {
  143. for _, ci := range copyInfos {
  144. if ci.tmpDir != "" {
  145. os.RemoveAll(ci.tmpDir)
  146. }
  147. }
  148. }()
  149. // Loop through each src file and calculate the info we need to
  150. // do the copy (e.g. hash value if cached). Don't actually do
  151. // the copy until we've looked at all src files
  152. for _, orig := range args[0 : len(args)-1] {
  153. if err := calcCopyInfo(
  154. b,
  155. cmdName,
  156. &copyInfos,
  157. orig,
  158. dest,
  159. allowRemote,
  160. allowDecompression,
  161. true,
  162. ); err != nil {
  163. return err
  164. }
  165. }
  166. if len(copyInfos) == 0 {
  167. return fmt.Errorf("No source files were specified")
  168. }
  169. if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
  170. return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
  171. }
  172. // For backwards compat, if there's just one CI then use it as the
  173. // cache look-up string, otherwise hash 'em all into one
  174. var srcHash string
  175. var origPaths string
  176. if len(copyInfos) == 1 {
  177. srcHash = copyInfos[0].hash
  178. origPaths = copyInfos[0].origPath
  179. } else {
  180. var hashs []string
  181. var origs []string
  182. for _, ci := range copyInfos {
  183. hashs = append(hashs, ci.hash)
  184. origs = append(origs, ci.origPath)
  185. }
  186. hasher := sha256.New()
  187. hasher.Write([]byte(strings.Join(hashs, ",")))
  188. srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
  189. origPaths = strings.Join(origs, " ")
  190. }
  191. cmd := b.Config.Cmd
  192. if runtime.GOOS != "windows" {
  193. b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
  194. } else {
  195. b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S /C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
  196. }
  197. defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
  198. hit, err := b.probeCache(ctx)
  199. if err != nil {
  200. return err
  201. }
  202. if hit {
  203. return nil
  204. }
  205. ccr, err := b.Daemon.ContainerCreate(ctx, "", b.Config, nil, true)
  206. if err != nil {
  207. return err
  208. }
  209. container, err := b.Daemon.Get(ctx, ccr.ID)
  210. if err != nil {
  211. return err
  212. }
  213. b.TmpContainers[container.ID] = struct{}{}
  214. if err := container.Mount(ctx); err != nil {
  215. return err
  216. }
  217. defer container.Unmount(ctx)
  218. for _, ci := range copyInfos {
  219. if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
  220. return err
  221. }
  222. }
  223. if err := b.commit(ctx, container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
  224. return err
  225. }
  226. return nil
  227. }
  228. func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
  229. // Work in daemon-specific OS filepath semantics. However, we save
  230. // the the origPath passed in here, as it might also be a URL which
  231. // we need to check for in this function.
  232. passedInOrigPath := origPath
  233. origPath = filepath.FromSlash(origPath)
  234. destPath = filepath.FromSlash(destPath)
  235. if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
  236. origPath = origPath[1:]
  237. }
  238. origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
  239. // Twiddle the destPath when its a relative path - meaning, make it
  240. // relative to the WORKINGDIR
  241. if !system.IsAbs(destPath) {
  242. hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator))
  243. destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath)
  244. // Make sure we preserve any trailing slash
  245. if hasSlash {
  246. destPath += string(os.PathSeparator)
  247. }
  248. }
  249. // In the remote/URL case, download it and gen its hashcode
  250. if urlutil.IsURL(passedInOrigPath) {
  251. // As it's a URL, we go back to processing on what was passed in
  252. // to this function
  253. origPath = passedInOrigPath
  254. if !allowRemote {
  255. return fmt.Errorf("Source can't be a URL for %s", cmdName)
  256. }
  257. ci := copyInfo{}
  258. ci.origPath = origPath
  259. ci.hash = origPath // default to this but can change
  260. ci.destPath = destPath
  261. ci.decompress = false
  262. *cInfos = append(*cInfos, &ci)
  263. // Initiate the download
  264. resp, err := httputils.Download(ci.origPath)
  265. if err != nil {
  266. return err
  267. }
  268. // Create a tmp dir
  269. tmpDirName, err := getTempDir(b.contextPath, "docker-remote")
  270. if err != nil {
  271. return err
  272. }
  273. ci.tmpDir = tmpDirName
  274. // Create a tmp file within our tmp dir
  275. tmpFileName := filepath.Join(tmpDirName, "tmp")
  276. tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
  277. if err != nil {
  278. return err
  279. }
  280. // Download and dump result to tmp file
  281. if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
  282. In: resp.Body,
  283. Out: b.OutOld,
  284. Formatter: b.StreamFormatter,
  285. Size: resp.ContentLength,
  286. NewLines: true,
  287. ID: "",
  288. Action: "Downloading",
  289. })); err != nil {
  290. tmpFile.Close()
  291. return err
  292. }
  293. fmt.Fprintf(b.OutStream, "\n")
  294. tmpFile.Close()
  295. // Set the mtime to the Last-Modified header value if present
  296. // Otherwise just remove atime and mtime
  297. times := make([]syscall.Timespec, 2)
  298. lastMod := resp.Header.Get("Last-Modified")
  299. if lastMod != "" {
  300. mTime, err := http.ParseTime(lastMod)
  301. // If we can't parse it then just let it default to 'zero'
  302. // otherwise use the parsed time value
  303. if err == nil {
  304. times[1] = syscall.NsecToTimespec(mTime.UnixNano())
  305. }
  306. }
  307. // Windows does not support UtimesNano.
  308. if runtime.GOOS != "windows" {
  309. if err := system.UtimesNano(tmpFileName, times); err != nil {
  310. return err
  311. }
  312. }
  313. ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
  314. // If the destination is a directory, figure out the filename.
  315. if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) {
  316. u, err := url.Parse(origPath)
  317. if err != nil {
  318. return err
  319. }
  320. path := filepath.FromSlash(u.Path) // Ensure in platform semantics
  321. if strings.HasSuffix(path, string(os.PathSeparator)) {
  322. path = path[:len(path)-1]
  323. }
  324. parts := strings.Split(path, string(os.PathSeparator))
  325. filename := parts[len(parts)-1]
  326. if filename == "" {
  327. return fmt.Errorf("cannot determine filename from url: %s", u)
  328. }
  329. ci.destPath = ci.destPath + filename
  330. }
  331. // Calc the checksum, even if we're using the cache
  332. r, err := archive.Tar(tmpFileName, archive.Uncompressed)
  333. if err != nil {
  334. return err
  335. }
  336. tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1)
  337. if err != nil {
  338. return err
  339. }
  340. if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
  341. return err
  342. }
  343. ci.hash = tarSum.Sum(nil)
  344. r.Close()
  345. return nil
  346. }
  347. // Deal with wildcards
  348. if allowWildcards && containsWildcards(origPath) {
  349. for _, fileInfo := range b.context.GetSums() {
  350. if fileInfo.Name() == "" {
  351. continue
  352. }
  353. match, _ := filepath.Match(origPath, fileInfo.Name())
  354. if !match {
  355. continue
  356. }
  357. // Note we set allowWildcards to false in case the name has
  358. // a * in it
  359. calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
  360. }
  361. return nil
  362. }
  363. // Must be a dir or a file
  364. if err := b.checkPathForAddition(origPath); err != nil {
  365. return err
  366. }
  367. fi, _ := os.Stat(filepath.Join(b.contextPath, origPath))
  368. ci := copyInfo{}
  369. ci.origPath = origPath
  370. ci.hash = origPath
  371. ci.destPath = destPath
  372. ci.decompress = allowDecompression
  373. *cInfos = append(*cInfos, &ci)
  374. // Deal with the single file case
  375. if !fi.IsDir() {
  376. // This will match first file in sums of the archive
  377. fis := b.context.GetSums().GetFile(ci.origPath)
  378. if fis != nil {
  379. ci.hash = "file:" + fis.Sum()
  380. }
  381. return nil
  382. }
  383. // Must be a dir
  384. var subfiles []string
  385. absOrigPath := filepath.Join(b.contextPath, ci.origPath)
  386. // Add a trailing / to make sure we only pick up nested files under
  387. // the dir and not sibling files of the dir that just happen to
  388. // start with the same chars
  389. if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) {
  390. absOrigPath += string(os.PathSeparator)
  391. }
  392. // Need path w/o slash too to find matching dir w/o trailing slash
  393. absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
  394. for _, fileInfo := range b.context.GetSums() {
  395. absFile := filepath.Join(b.contextPath, fileInfo.Name())
  396. // Any file in the context that starts with the given path will be
  397. // picked up and its hashcode used. However, we'll exclude the
  398. // root dir itself. We do this for a coupel of reasons:
  399. // 1 - ADD/COPY will not copy the dir itself, just its children
  400. // so there's no reason to include it in the hash calc
  401. // 2 - the metadata on the dir will change when any child file
  402. // changes. This will lead to a miss in the cache check if that
  403. // child file is in the .dockerignore list.
  404. if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
  405. subfiles = append(subfiles, fileInfo.Sum())
  406. }
  407. }
  408. sort.Strings(subfiles)
  409. hasher := sha256.New()
  410. hasher.Write([]byte(strings.Join(subfiles, ",")))
  411. ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
  412. return nil
  413. }
  414. func containsWildcards(name string) bool {
  415. for i := 0; i < len(name); i++ {
  416. ch := name[i]
  417. if ch == '\\' {
  418. i++
  419. } else if ch == '*' || ch == '?' || ch == '[' {
  420. return true
  421. }
  422. }
  423. return false
  424. }
  425. func (b *builder) pullImage(ctx context.Context, name string) (*image.Image, error) {
  426. remote, tag := parsers.ParseRepositoryTag(name)
  427. if tag == "" {
  428. tag = "latest"
  429. }
  430. pullRegistryAuth := &cliconfig.AuthConfig{}
  431. if len(b.AuthConfigs) > 0 {
  432. // The request came with a full auth config file, we prefer to use that
  433. repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote)
  434. if err != nil {
  435. return nil, err
  436. }
  437. resolvedConfig := registry.ResolveAuthConfig(
  438. &cliconfig.ConfigFile{AuthConfigs: b.AuthConfigs},
  439. repoInfo.Index,
  440. )
  441. pullRegistryAuth = &resolvedConfig
  442. }
  443. imagePullConfig := &graph.ImagePullConfig{
  444. AuthConfig: pullRegistryAuth,
  445. OutStream: ioutils.NopWriteCloser(b.OutOld),
  446. }
  447. if err := b.Daemon.Repositories().Pull(ctx, remote, tag, imagePullConfig); err != nil {
  448. return nil, err
  449. }
  450. image, err := b.Daemon.Repositories().LookupImage(name)
  451. if err != nil {
  452. return nil, err
  453. }
  454. return image, nil
  455. }
  456. func (b *builder) processImageFrom(ctx context.Context, img *image.Image) error {
  457. b.image = img.ID
  458. if img.Config != nil {
  459. b.Config = img.Config
  460. }
  461. // The default path will be blank on Windows (set by HCS)
  462. if len(b.Config.Env) == 0 && daemon.DefaultPathEnv != "" {
  463. b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
  464. }
  465. // Process ONBUILD triggers if they exist
  466. if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
  467. word := "trigger"
  468. if nTriggers > 1 {
  469. word = "triggers"
  470. }
  471. fmt.Fprintf(b.ErrStream, "# Executing %d build %s...\n", nTriggers, word)
  472. }
  473. // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
  474. onBuildTriggers := b.Config.OnBuild
  475. b.Config.OnBuild = []string{}
  476. // parse the ONBUILD triggers by invoking the parser
  477. for _, step := range onBuildTriggers {
  478. ast, err := parser.Parse(strings.NewReader(step))
  479. if err != nil {
  480. return err
  481. }
  482. for i, n := range ast.Children {
  483. switch strings.ToUpper(n.Value) {
  484. case "ONBUILD":
  485. return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
  486. case "MAINTAINER", "FROM":
  487. return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
  488. }
  489. if err := b.dispatch(ctx, i, n); err != nil {
  490. return err
  491. }
  492. }
  493. }
  494. return nil
  495. }
  496. // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
  497. // and if so attempts to look up the current `b.image` and `b.Config` pair
  498. // in the current server `b.Daemon`. If an image is found, probeCache returns
  499. // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
  500. // is any error, it returns `(false, err)`.
  501. func (b *builder) probeCache(ctx context.Context) (bool, error) {
  502. if !b.UtilizeCache || b.cacheBusted {
  503. return false, nil
  504. }
  505. cache, err := b.Daemon.ImageGetCached(ctx, b.image, b.Config)
  506. if err != nil {
  507. return false, err
  508. }
  509. if cache == nil {
  510. logrus.Debugf("[BUILDER] Cache miss")
  511. b.cacheBusted = true
  512. return false, nil
  513. }
  514. fmt.Fprintf(b.OutStream, " ---> Using cache\n")
  515. logrus.Debugf("[BUILDER] Use cached version")
  516. b.image = cache.ID
  517. b.Daemon.Graph().Retain(b.id, cache.ID)
  518. b.activeImages = append(b.activeImages, cache.ID)
  519. return true, nil
  520. }
  521. func (b *builder) create(ctx context.Context) (*daemon.Container, error) {
  522. if b.image == "" && !b.noBaseImage {
  523. return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
  524. }
  525. b.Config.Image = b.image
  526. hostConfig := &runconfig.HostConfig{
  527. CPUShares: b.cpuShares,
  528. CPUPeriod: b.cpuPeriod,
  529. CPUQuota: b.cpuQuota,
  530. CpusetCpus: b.cpuSetCpus,
  531. CpusetMems: b.cpuSetMems,
  532. CgroupParent: b.cgroupParent,
  533. Memory: b.memory,
  534. MemorySwap: b.memorySwap,
  535. Ulimits: b.ulimits,
  536. }
  537. config := *b.Config
  538. // Create the container
  539. ccr, err := b.Daemon.ContainerCreate(ctx, "", b.Config, hostConfig, true)
  540. if err != nil {
  541. return nil, err
  542. }
  543. for _, warning := range ccr.Warnings {
  544. fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
  545. }
  546. c, err := b.Daemon.Get(ctx, ccr.ID)
  547. if err != nil {
  548. return nil, err
  549. }
  550. b.TmpContainers[c.ID] = struct{}{}
  551. fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID))
  552. if config.Cmd.Len() > 0 {
  553. // override the entry point that may have been picked up from the base image
  554. s := config.Cmd.Slice()
  555. c.Path = s[0]
  556. c.Args = s[1:]
  557. } else {
  558. config.Cmd = stringutils.NewStrSlice()
  559. }
  560. return c, nil
  561. }
  562. func (b *builder) run(ctx context.Context, c *daemon.Container) error {
  563. var errCh chan error
  564. if b.Verbose {
  565. errCh = c.Attach(nil, b.OutStream, b.ErrStream)
  566. }
  567. //start the container
  568. if err := c.Start(ctx); err != nil {
  569. return err
  570. }
  571. finished := make(chan struct{})
  572. defer close(finished)
  573. go func() {
  574. select {
  575. case <-b.cancelled:
  576. logrus.Debugln("Build cancelled, killing container:", c.ID)
  577. c.Kill(ctx)
  578. case <-finished:
  579. }
  580. }()
  581. if b.Verbose {
  582. // Block on reading output from container, stop on err or chan closed
  583. if err := <-errCh; err != nil {
  584. return err
  585. }
  586. }
  587. // Wait for it to finish
  588. if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
  589. return &jsonmessage.JSONError{
  590. Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
  591. Code: ret,
  592. }
  593. }
  594. return nil
  595. }
  596. func (b *builder) checkPathForAddition(orig string) error {
  597. origPath := filepath.Join(b.contextPath, orig)
  598. origPath, err := symlink.EvalSymlinks(origPath)
  599. if err != nil {
  600. if os.IsNotExist(err) {
  601. return fmt.Errorf("%s: no such file or directory", orig)
  602. }
  603. return err
  604. }
  605. contextPath, err := symlink.EvalSymlinks(b.contextPath)
  606. if err != nil {
  607. return err
  608. }
  609. if !strings.HasPrefix(origPath, contextPath) {
  610. return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
  611. }
  612. if _, err := os.Stat(origPath); err != nil {
  613. if os.IsNotExist(err) {
  614. return fmt.Errorf("%s: no such file or directory", orig)
  615. }
  616. return err
  617. }
  618. return nil
  619. }
  620. func (b *builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
  621. var (
  622. err error
  623. destExists = true
  624. origPath = filepath.Join(b.contextPath, orig)
  625. destPath string
  626. )
  627. // Work in daemon-local OS specific file paths
  628. dest = filepath.FromSlash(dest)
  629. destPath, err = container.GetResourcePath(dest)
  630. if err != nil {
  631. return err
  632. }
  633. // Preserve the trailing slash
  634. if strings.HasSuffix(dest, string(os.PathSeparator)) || dest == "." {
  635. destPath = destPath + string(os.PathSeparator)
  636. }
  637. destStat, err := os.Stat(destPath)
  638. if err != nil {
  639. if !os.IsNotExist(err) {
  640. logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
  641. return err
  642. }
  643. destExists = false
  644. }
  645. fi, err := os.Stat(origPath)
  646. if err != nil {
  647. if os.IsNotExist(err) {
  648. return fmt.Errorf("%s: no such file or directory", orig)
  649. }
  650. return err
  651. }
  652. if fi.IsDir() {
  653. return copyAsDirectory(origPath, destPath, destExists)
  654. }
  655. // If we are adding a remote file (or we've been told not to decompress), do not try to untar it
  656. if decompress {
  657. // First try to unpack the source as an archive
  658. // to support the untar feature we need to clean up the path a little bit
  659. // because tar is very forgiving. First we need to strip off the archive's
  660. // filename from the path but this is only added if it does not end in slash
  661. tarDest := destPath
  662. if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
  663. tarDest = filepath.Dir(destPath)
  664. }
  665. // try to successfully untar the orig
  666. if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
  667. return nil
  668. } else if err != io.EOF {
  669. logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
  670. }
  671. }
  672. if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
  673. return err
  674. }
  675. if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
  676. return err
  677. }
  678. resPath := destPath
  679. if destExists && destStat.IsDir() {
  680. resPath = filepath.Join(destPath, filepath.Base(origPath))
  681. }
  682. return fixPermissions(origPath, resPath, 0, 0, destExists)
  683. }
  684. func copyAsDirectory(source, destination string, destExisted bool) error {
  685. if err := chrootarchive.CopyWithTar(source, destination); err != nil {
  686. return err
  687. }
  688. return fixPermissions(source, destination, 0, 0, destExisted)
  689. }
  690. func (b *builder) clearTmp(ctx context.Context) {
  691. for c := range b.TmpContainers {
  692. rmConfig := &daemon.ContainerRmConfig{
  693. ForceRemove: true,
  694. RemoveVolume: true,
  695. }
  696. if err := b.Daemon.ContainerRm(ctx, c, rmConfig); err != nil {
  697. fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
  698. return
  699. }
  700. delete(b.TmpContainers, c)
  701. fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
  702. }
  703. }