internals.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. package builder
  2. // internals for handling commands. Covers many areas and a lot of
  3. // non-contiguous functionality. Please read the comments.
  4. import (
  5. "crypto/sha256"
  6. "encoding/hex"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "net/http"
  11. "net/url"
  12. "os"
  13. "path"
  14. "path/filepath"
  15. "sort"
  16. "strings"
  17. "syscall"
  18. "time"
  19. "github.com/Sirupsen/logrus"
  20. "github.com/docker/docker/builder/parser"
  21. "github.com/docker/docker/daemon"
  22. "github.com/docker/docker/graph"
  23. imagepkg "github.com/docker/docker/image"
  24. "github.com/docker/docker/pkg/archive"
  25. "github.com/docker/docker/pkg/chrootarchive"
  26. "github.com/docker/docker/pkg/httputils"
  27. "github.com/docker/docker/pkg/ioutils"
  28. "github.com/docker/docker/pkg/jsonmessage"
  29. "github.com/docker/docker/pkg/parsers"
  30. "github.com/docker/docker/pkg/progressreader"
  31. "github.com/docker/docker/pkg/stringid"
  32. "github.com/docker/docker/pkg/symlink"
  33. "github.com/docker/docker/pkg/system"
  34. "github.com/docker/docker/pkg/tarsum"
  35. "github.com/docker/docker/pkg/urlutil"
  36. "github.com/docker/docker/runconfig"
  37. )
  38. func (b *Builder) readContext(context io.Reader) error {
  39. tmpdirPath, err := ioutil.TempDir("", "docker-build")
  40. if err != nil {
  41. return err
  42. }
  43. decompressedStream, err := archive.DecompressStream(context)
  44. if err != nil {
  45. return err
  46. }
  47. if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
  48. return err
  49. }
  50. if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
  51. return err
  52. }
  53. b.contextPath = tmpdirPath
  54. return nil
  55. }
  56. func (b *Builder) commit(id string, autoCmd *runconfig.Command, comment string) error {
  57. if b.disableCommit {
  58. return nil
  59. }
  60. if b.image == "" && !b.noBaseImage {
  61. return fmt.Errorf("Please provide a source image with `from` prior to commit")
  62. }
  63. b.Config.Image = b.image
  64. if id == "" {
  65. cmd := b.Config.Cmd
  66. b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", "#(nop) "+comment)
  67. defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
  68. hit, err := b.probeCache()
  69. if err != nil {
  70. return err
  71. }
  72. if hit {
  73. return nil
  74. }
  75. container, err := b.create()
  76. if err != nil {
  77. return err
  78. }
  79. id = container.ID
  80. if err := container.Mount(); err != nil {
  81. return err
  82. }
  83. defer container.Unmount()
  84. }
  85. container, err := b.Daemon.Get(id)
  86. if err != nil {
  87. return err
  88. }
  89. // Note: Actually copy the struct
  90. autoConfig := *b.Config
  91. autoConfig.Cmd = autoCmd
  92. // Commit the container
  93. image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
  94. if err != nil {
  95. return err
  96. }
  97. b.image = image.ID
  98. return nil
  99. }
  100. type copyInfo struct {
  101. origPath string
  102. destPath string
  103. hash string
  104. decompress bool
  105. tmpDir string
  106. }
  107. func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
  108. if b.context == nil {
  109. return fmt.Errorf("No context given. Impossible to use %s", cmdName)
  110. }
  111. if len(args) < 2 {
  112. return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
  113. }
  114. dest := args[len(args)-1] // last one is always the dest
  115. copyInfos := []*copyInfo{}
  116. b.Config.Image = b.image
  117. defer func() {
  118. for _, ci := range copyInfos {
  119. if ci.tmpDir != "" {
  120. os.RemoveAll(ci.tmpDir)
  121. }
  122. }
  123. }()
  124. // Loop through each src file and calculate the info we need to
  125. // do the copy (e.g. hash value if cached). Don't actually do
  126. // the copy until we've looked at all src files
  127. for _, orig := range args[0 : len(args)-1] {
  128. err := calcCopyInfo(b, cmdName, &copyInfos, orig, dest, allowRemote, allowDecompression)
  129. if err != nil {
  130. return err
  131. }
  132. }
  133. if len(copyInfos) == 0 {
  134. return fmt.Errorf("No source files were specified")
  135. }
  136. if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") {
  137. return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
  138. }
  139. // For backwards compat, if there's just one CI then use it as the
  140. // cache look-up string, otherwise hash 'em all into one
  141. var srcHash string
  142. var origPaths string
  143. if len(copyInfos) == 1 {
  144. srcHash = copyInfos[0].hash
  145. origPaths = copyInfos[0].origPath
  146. } else {
  147. var hashs []string
  148. var origs []string
  149. for _, ci := range copyInfos {
  150. hashs = append(hashs, ci.hash)
  151. origs = append(origs, ci.origPath)
  152. }
  153. hasher := sha256.New()
  154. hasher.Write([]byte(strings.Join(hashs, ",")))
  155. srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
  156. origPaths = strings.Join(origs, " ")
  157. }
  158. cmd := b.Config.Cmd
  159. b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
  160. defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
  161. hit, err := b.probeCache()
  162. if err != nil {
  163. return err
  164. }
  165. if hit {
  166. return nil
  167. }
  168. container, _, err := b.Daemon.Create(b.Config, nil, "")
  169. if err != nil {
  170. return err
  171. }
  172. b.TmpContainers[container.ID] = struct{}{}
  173. if err := container.Mount(); err != nil {
  174. return err
  175. }
  176. defer container.Unmount()
  177. for _, ci := range copyInfos {
  178. if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
  179. return err
  180. }
  181. }
  182. if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
  183. return err
  184. }
  185. return nil
  186. }
  187. func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
  188. if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
  189. origPath = origPath[1:]
  190. }
  191. origPath = strings.TrimPrefix(origPath, "./")
  192. // Twiddle the destPath when its a relative path - meaning, make it
  193. // relative to the WORKINGDIR
  194. if !filepath.IsAbs(destPath) {
  195. hasSlash := strings.HasSuffix(destPath, "/")
  196. destPath = filepath.Join("/", b.Config.WorkingDir, destPath)
  197. // Make sure we preserve any trailing slash
  198. if hasSlash {
  199. destPath += "/"
  200. }
  201. }
  202. // In the remote/URL case, download it and gen its hashcode
  203. if urlutil.IsURL(origPath) {
  204. if !allowRemote {
  205. return fmt.Errorf("Source can't be a URL for %s", cmdName)
  206. }
  207. ci := copyInfo{}
  208. ci.origPath = origPath
  209. ci.hash = origPath // default to this but can change
  210. ci.destPath = destPath
  211. ci.decompress = false
  212. *cInfos = append(*cInfos, &ci)
  213. // Initiate the download
  214. resp, err := httputils.Download(ci.origPath)
  215. if err != nil {
  216. return err
  217. }
  218. // Create a tmp dir
  219. tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
  220. if err != nil {
  221. return err
  222. }
  223. ci.tmpDir = tmpDirName
  224. // Create a tmp file within our tmp dir
  225. tmpFileName := path.Join(tmpDirName, "tmp")
  226. tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
  227. if err != nil {
  228. return err
  229. }
  230. // Download and dump result to tmp file
  231. if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
  232. In: resp.Body,
  233. Out: b.OutOld,
  234. Formatter: b.StreamFormatter,
  235. Size: int(resp.ContentLength),
  236. NewLines: true,
  237. ID: "",
  238. Action: "Downloading",
  239. })); err != nil {
  240. tmpFile.Close()
  241. return err
  242. }
  243. fmt.Fprintf(b.OutStream, "\n")
  244. tmpFile.Close()
  245. // Set the mtime to the Last-Modified header value if present
  246. // Otherwise just remove atime and mtime
  247. times := make([]syscall.Timespec, 2)
  248. lastMod := resp.Header.Get("Last-Modified")
  249. if lastMod != "" {
  250. mTime, err := http.ParseTime(lastMod)
  251. // If we can't parse it then just let it default to 'zero'
  252. // otherwise use the parsed time value
  253. if err == nil {
  254. times[1] = syscall.NsecToTimespec(mTime.UnixNano())
  255. }
  256. }
  257. if err := system.UtimesNano(tmpFileName, times); err != nil {
  258. return err
  259. }
  260. ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
  261. // If the destination is a directory, figure out the filename.
  262. if strings.HasSuffix(ci.destPath, "/") {
  263. u, err := url.Parse(origPath)
  264. if err != nil {
  265. return err
  266. }
  267. path := u.Path
  268. if strings.HasSuffix(path, "/") {
  269. path = path[:len(path)-1]
  270. }
  271. parts := strings.Split(path, "/")
  272. filename := parts[len(parts)-1]
  273. if filename == "" {
  274. return fmt.Errorf("cannot determine filename from url: %s", u)
  275. }
  276. ci.destPath = ci.destPath + filename
  277. }
  278. // Calc the checksum, even if we're using the cache
  279. r, err := archive.Tar(tmpFileName, archive.Uncompressed)
  280. if err != nil {
  281. return err
  282. }
  283. tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
  284. if err != nil {
  285. return err
  286. }
  287. if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
  288. return err
  289. }
  290. ci.hash = tarSum.Sum(nil)
  291. r.Close()
  292. return nil
  293. }
  294. // Deal with wildcards
  295. if ContainsWildcards(origPath) {
  296. for _, fileInfo := range b.context.GetSums() {
  297. if fileInfo.Name() == "" {
  298. continue
  299. }
  300. match, _ := path.Match(origPath, fileInfo.Name())
  301. if !match {
  302. continue
  303. }
  304. calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
  305. }
  306. return nil
  307. }
  308. // Must be a dir or a file
  309. if err := b.checkPathForAddition(origPath); err != nil {
  310. return err
  311. }
  312. fi, _ := os.Stat(path.Join(b.contextPath, origPath))
  313. ci := copyInfo{}
  314. ci.origPath = origPath
  315. ci.hash = origPath
  316. ci.destPath = destPath
  317. ci.decompress = allowDecompression
  318. *cInfos = append(*cInfos, &ci)
  319. // Deal with the single file case
  320. if !fi.IsDir() {
  321. // This will match first file in sums of the archive
  322. fis := b.context.GetSums().GetFile(ci.origPath)
  323. if fis != nil {
  324. ci.hash = "file:" + fis.Sum()
  325. }
  326. return nil
  327. }
  328. // Must be a dir
  329. var subfiles []string
  330. absOrigPath := path.Join(b.contextPath, ci.origPath)
  331. // Add a trailing / to make sure we only pick up nested files under
  332. // the dir and not sibling files of the dir that just happen to
  333. // start with the same chars
  334. if !strings.HasSuffix(absOrigPath, "/") {
  335. absOrigPath += "/"
  336. }
  337. // Need path w/o / too to find matching dir w/o trailing /
  338. absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
  339. for _, fileInfo := range b.context.GetSums() {
  340. absFile := path.Join(b.contextPath, fileInfo.Name())
  341. // Any file in the context that starts with the given path will be
  342. // picked up and its hashcode used. However, we'll exclude the
  343. // root dir itself. We do this for a coupel of reasons:
  344. // 1 - ADD/COPY will not copy the dir itself, just its children
  345. // so there's no reason to include it in the hash calc
  346. // 2 - the metadata on the dir will change when any child file
  347. // changes. This will lead to a miss in the cache check if that
  348. // child file is in the .dockerignore list.
  349. if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
  350. subfiles = append(subfiles, fileInfo.Sum())
  351. }
  352. }
  353. sort.Strings(subfiles)
  354. hasher := sha256.New()
  355. hasher.Write([]byte(strings.Join(subfiles, ",")))
  356. ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
  357. return nil
  358. }
  359. func ContainsWildcards(name string) bool {
  360. for i := 0; i < len(name); i++ {
  361. ch := name[i]
  362. if ch == '\\' {
  363. i++
  364. } else if ch == '*' || ch == '?' || ch == '[' {
  365. return true
  366. }
  367. }
  368. return false
  369. }
  370. func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
  371. remote, tag := parsers.ParseRepositoryTag(name)
  372. if tag == "" {
  373. tag = "latest"
  374. }
  375. pullRegistryAuth := b.AuthConfig
  376. if len(b.ConfigFile.AuthConfigs) > 0 {
  377. // The request came with a full auth config file, we prefer to use that
  378. repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote)
  379. if err != nil {
  380. return nil, err
  381. }
  382. resolvedAuth := b.ConfigFile.ResolveAuthConfig(repoInfo.Index)
  383. pullRegistryAuth = &resolvedAuth
  384. }
  385. imagePullConfig := &graph.ImagePullConfig{
  386. Parallel: true,
  387. AuthConfig: pullRegistryAuth,
  388. OutStream: ioutils.NopWriteCloser(b.OutOld),
  389. Json: b.StreamFormatter.Json(),
  390. }
  391. if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
  392. return nil, err
  393. }
  394. image, err := b.Daemon.Repositories().LookupImage(name)
  395. if err != nil {
  396. return nil, err
  397. }
  398. return image, nil
  399. }
  400. func (b *Builder) processImageFrom(img *imagepkg.Image) error {
  401. b.image = img.ID
  402. if img.Config != nil {
  403. b.Config = img.Config
  404. }
  405. if len(b.Config.Env) == 0 {
  406. b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
  407. }
  408. // Process ONBUILD triggers if they exist
  409. if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
  410. fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
  411. }
  412. // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
  413. onBuildTriggers := b.Config.OnBuild
  414. b.Config.OnBuild = []string{}
  415. // parse the ONBUILD triggers by invoking the parser
  416. for stepN, step := range onBuildTriggers {
  417. ast, err := parser.Parse(strings.NewReader(step))
  418. if err != nil {
  419. return err
  420. }
  421. for i, n := range ast.Children {
  422. switch strings.ToUpper(n.Value) {
  423. case "ONBUILD":
  424. return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
  425. case "MAINTAINER", "FROM":
  426. return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
  427. }
  428. fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
  429. if err := b.dispatch(i, n); err != nil {
  430. return err
  431. }
  432. }
  433. }
  434. return nil
  435. }
  436. // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
  437. // and if so attempts to look up the current `b.image` and `b.Config` pair
  438. // in the current server `b.Daemon`. If an image is found, probeCache returns
  439. // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
  440. // is any error, it returns `(false, err)`.
  441. func (b *Builder) probeCache() (bool, error) {
  442. if !b.UtilizeCache || b.cacheBusted {
  443. return false, nil
  444. }
  445. cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
  446. if err != nil {
  447. return false, err
  448. }
  449. if cache == nil {
  450. logrus.Debugf("[BUILDER] Cache miss")
  451. b.cacheBusted = true
  452. return false, nil
  453. }
  454. fmt.Fprintf(b.OutStream, " ---> Using cache\n")
  455. logrus.Debugf("[BUILDER] Use cached version")
  456. b.image = cache.ID
  457. return true, nil
  458. }
  459. func (b *Builder) create() (*daemon.Container, error) {
  460. if b.image == "" && !b.noBaseImage {
  461. return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
  462. }
  463. b.Config.Image = b.image
  464. hostConfig := &runconfig.HostConfig{
  465. CpuShares: b.cpuShares,
  466. CpusetCpus: b.cpuSetCpus,
  467. CpusetMems: b.cpuSetMems,
  468. Memory: b.memory,
  469. MemorySwap: b.memorySwap,
  470. }
  471. config := *b.Config
  472. // Create the container
  473. c, warnings, err := b.Daemon.Create(b.Config, hostConfig, "")
  474. if err != nil {
  475. return nil, err
  476. }
  477. for _, warning := range warnings {
  478. fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
  479. }
  480. b.TmpContainers[c.ID] = struct{}{}
  481. fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID))
  482. if config.Cmd.Len() > 0 {
  483. // override the entry point that may have been picked up from the base image
  484. s := config.Cmd.Slice()
  485. c.Path = s[0]
  486. c.Args = s[1:]
  487. } else {
  488. config.Cmd = runconfig.NewCommand()
  489. }
  490. return c, nil
  491. }
  492. func (b *Builder) run(c *daemon.Container) error {
  493. var errCh chan error
  494. if b.Verbose {
  495. errCh = c.Attach(nil, b.OutStream, b.ErrStream)
  496. }
  497. //start the container
  498. if err := c.Start(); err != nil {
  499. return err
  500. }
  501. finished := make(chan struct{})
  502. defer close(finished)
  503. go func() {
  504. select {
  505. case <-b.cancelled:
  506. logrus.Debugln("Build cancelled, killing container:", c.ID)
  507. c.Kill()
  508. case <-finished:
  509. }
  510. }()
  511. if b.Verbose {
  512. // Block on reading output from container, stop on err or chan closed
  513. if err := <-errCh; err != nil {
  514. return err
  515. }
  516. }
  517. // Wait for it to finish
  518. if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
  519. return &jsonmessage.JSONError{
  520. Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
  521. Code: ret,
  522. }
  523. }
  524. return nil
  525. }
  526. func (b *Builder) checkPathForAddition(orig string) error {
  527. origPath := path.Join(b.contextPath, orig)
  528. origPath, err := filepath.EvalSymlinks(origPath)
  529. if err != nil {
  530. if os.IsNotExist(err) {
  531. return fmt.Errorf("%s: no such file or directory", orig)
  532. }
  533. return err
  534. }
  535. if !strings.HasPrefix(origPath, b.contextPath) {
  536. return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
  537. }
  538. if _, err := os.Stat(origPath); err != nil {
  539. if os.IsNotExist(err) {
  540. return fmt.Errorf("%s: no such file or directory", orig)
  541. }
  542. return err
  543. }
  544. return nil
  545. }
  546. func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
  547. var (
  548. err error
  549. destExists = true
  550. origPath = path.Join(b.contextPath, orig)
  551. destPath = path.Join(container.RootfsPath(), dest)
  552. )
  553. if destPath != container.RootfsPath() {
  554. destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
  555. if err != nil {
  556. return err
  557. }
  558. }
  559. // Preserve the trailing '/'
  560. if strings.HasSuffix(dest, "/") || dest == "." {
  561. destPath = destPath + "/"
  562. }
  563. destStat, err := os.Stat(destPath)
  564. if err != nil {
  565. if !os.IsNotExist(err) {
  566. return err
  567. }
  568. destExists = false
  569. }
  570. fi, err := os.Stat(origPath)
  571. if err != nil {
  572. if os.IsNotExist(err) {
  573. return fmt.Errorf("%s: no such file or directory", orig)
  574. }
  575. return err
  576. }
  577. if fi.IsDir() {
  578. return copyAsDirectory(origPath, destPath, destExists)
  579. }
  580. // If we are adding a remote file (or we've been told not to decompress), do not try to untar it
  581. if decompress {
  582. // First try to unpack the source as an archive
  583. // to support the untar feature we need to clean up the path a little bit
  584. // because tar is very forgiving. First we need to strip off the archive's
  585. // filename from the path but this is only added if it does not end in / .
  586. tarDest := destPath
  587. if strings.HasSuffix(tarDest, "/") {
  588. tarDest = filepath.Dir(destPath)
  589. }
  590. // try to successfully untar the orig
  591. if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
  592. return nil
  593. } else if err != io.EOF {
  594. logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
  595. }
  596. }
  597. if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
  598. return err
  599. }
  600. if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
  601. return err
  602. }
  603. resPath := destPath
  604. if destExists && destStat.IsDir() {
  605. resPath = path.Join(destPath, path.Base(origPath))
  606. }
  607. return fixPermissions(origPath, resPath, 0, 0, destExists)
  608. }
  609. func copyAsDirectory(source, destination string, destExisted bool) error {
  610. if err := chrootarchive.CopyWithTar(source, destination); err != nil {
  611. return err
  612. }
  613. return fixPermissions(source, destination, 0, 0, destExisted)
  614. }
  615. func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
  616. // If the destination didn't already exist, or the destination isn't a
  617. // directory, then we should Lchown the destination. Otherwise, we shouldn't
  618. // Lchown the destination.
  619. destStat, err := os.Stat(destination)
  620. if err != nil {
  621. // This should *never* be reached, because the destination must've already
  622. // been created while untar-ing the context.
  623. return err
  624. }
  625. doChownDestination := !destExisted || !destStat.IsDir()
  626. // We Walk on the source rather than on the destination because we don't
  627. // want to change permissions on things we haven't created or modified.
  628. return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
  629. // Do not alter the walk root iff. it existed before, as it doesn't fall under
  630. // the domain of "things we should chown".
  631. if !doChownDestination && (source == fullpath) {
  632. return nil
  633. }
  634. // Path is prefixed by source: substitute with destination instead.
  635. cleaned, err := filepath.Rel(source, fullpath)
  636. if err != nil {
  637. return err
  638. }
  639. fullpath = path.Join(destination, cleaned)
  640. return os.Lchown(fullpath, uid, gid)
  641. })
  642. }
  643. func (b *Builder) clearTmp() {
  644. for c := range b.TmpContainers {
  645. tmp, err := b.Daemon.Get(c)
  646. if err != nil {
  647. fmt.Fprint(b.OutStream, err.Error())
  648. }
  649. if err := b.Daemon.Rm(tmp); err != nil {
  650. fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
  651. return
  652. }
  653. b.Daemon.DeleteVolumes(tmp.VolumePaths())
  654. delete(b.TmpContainers, c)
  655. fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
  656. }
  657. }