internals.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771
  1. package builder
  2. // internals for handling commands. Covers many areas and a lot of
  3. // non-contiguous functionality. Please read the comments.
  4. import (
  5. "crypto/sha256"
  6. "encoding/hex"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "net/http"
  11. "net/url"
  12. "os"
  13. "path"
  14. "path/filepath"
  15. "sort"
  16. "strings"
  17. "syscall"
  18. "time"
  19. "github.com/Sirupsen/logrus"
  20. "github.com/docker/docker/builder/parser"
  21. "github.com/docker/docker/daemon"
  22. "github.com/docker/docker/graph"
  23. imagepkg "github.com/docker/docker/image"
  24. "github.com/docker/docker/pkg/archive"
  25. "github.com/docker/docker/pkg/chrootarchive"
  26. "github.com/docker/docker/pkg/httputils"
  27. "github.com/docker/docker/pkg/ioutils"
  28. "github.com/docker/docker/pkg/jsonmessage"
  29. "github.com/docker/docker/pkg/parsers"
  30. "github.com/docker/docker/pkg/progressreader"
  31. "github.com/docker/docker/pkg/stringid"
  32. "github.com/docker/docker/pkg/symlink"
  33. "github.com/docker/docker/pkg/system"
  34. "github.com/docker/docker/pkg/tarsum"
  35. "github.com/docker/docker/pkg/urlutil"
  36. "github.com/docker/docker/registry"
  37. "github.com/docker/docker/runconfig"
  38. )
  39. func (b *Builder) readContext(context io.Reader) error {
  40. tmpdirPath, err := ioutil.TempDir("", "docker-build")
  41. if err != nil {
  42. return err
  43. }
  44. decompressedStream, err := archive.DecompressStream(context)
  45. if err != nil {
  46. return err
  47. }
  48. if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
  49. return err
  50. }
  51. if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
  52. return err
  53. }
  54. b.contextPath = tmpdirPath
  55. return nil
  56. }
  57. func (b *Builder) commit(id string, autoCmd *runconfig.Command, comment string) error {
  58. if b.disableCommit {
  59. return nil
  60. }
  61. if b.image == "" && !b.noBaseImage {
  62. return fmt.Errorf("Please provide a source image with `from` prior to commit")
  63. }
  64. b.Config.Image = b.image
  65. if id == "" {
  66. cmd := b.Config.Cmd
  67. b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", "#(nop) "+comment)
  68. defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
  69. hit, err := b.probeCache()
  70. if err != nil {
  71. return err
  72. }
  73. if hit {
  74. return nil
  75. }
  76. container, err := b.create()
  77. if err != nil {
  78. return err
  79. }
  80. id = container.ID
  81. if err := container.Mount(); err != nil {
  82. return err
  83. }
  84. defer container.Unmount()
  85. }
  86. container, err := b.Daemon.Get(id)
  87. if err != nil {
  88. return err
  89. }
  90. // Note: Actually copy the struct
  91. autoConfig := *b.Config
  92. autoConfig.Cmd = autoCmd
  93. // Commit the container
  94. image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
  95. if err != nil {
  96. return err
  97. }
  98. b.image = image.ID
  99. return nil
  100. }
  101. type copyInfo struct {
  102. origPath string
  103. destPath string
  104. hash string
  105. decompress bool
  106. tmpDir string
  107. }
  108. func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
  109. if b.context == nil {
  110. return fmt.Errorf("No context given. Impossible to use %s", cmdName)
  111. }
  112. if len(args) < 2 {
  113. return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
  114. }
  115. dest := args[len(args)-1] // last one is always the dest
  116. copyInfos := []*copyInfo{}
  117. b.Config.Image = b.image
  118. defer func() {
  119. for _, ci := range copyInfos {
  120. if ci.tmpDir != "" {
  121. os.RemoveAll(ci.tmpDir)
  122. }
  123. }
  124. }()
  125. // Loop through each src file and calculate the info we need to
  126. // do the copy (e.g. hash value if cached). Don't actually do
  127. // the copy until we've looked at all src files
  128. for _, orig := range args[0 : len(args)-1] {
  129. err := calcCopyInfo(b, cmdName, &copyInfos, orig, dest, allowRemote, allowDecompression)
  130. if err != nil {
  131. return err
  132. }
  133. }
  134. if len(copyInfos) == 0 {
  135. return fmt.Errorf("No source files were specified")
  136. }
  137. if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") {
  138. return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
  139. }
  140. // For backwards compat, if there's just one CI then use it as the
  141. // cache look-up string, otherwise hash 'em all into one
  142. var srcHash string
  143. var origPaths string
  144. if len(copyInfos) == 1 {
  145. srcHash = copyInfos[0].hash
  146. origPaths = copyInfos[0].origPath
  147. } else {
  148. var hashs []string
  149. var origs []string
  150. for _, ci := range copyInfos {
  151. hashs = append(hashs, ci.hash)
  152. origs = append(origs, ci.origPath)
  153. }
  154. hasher := sha256.New()
  155. hasher.Write([]byte(strings.Join(hashs, ",")))
  156. srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
  157. origPaths = strings.Join(origs, " ")
  158. }
  159. cmd := b.Config.Cmd
  160. b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
  161. defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
  162. hit, err := b.probeCache()
  163. if err != nil {
  164. return err
  165. }
  166. if hit {
  167. return nil
  168. }
  169. container, _, err := b.Daemon.Create(b.Config, nil, "")
  170. if err != nil {
  171. return err
  172. }
  173. b.TmpContainers[container.ID] = struct{}{}
  174. if err := container.Mount(); err != nil {
  175. return err
  176. }
  177. defer container.Unmount()
  178. for _, ci := range copyInfos {
  179. if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
  180. return err
  181. }
  182. }
  183. if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
  184. return err
  185. }
  186. return nil
  187. }
  188. func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
  189. if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
  190. origPath = origPath[1:]
  191. }
  192. origPath = strings.TrimPrefix(origPath, "./")
  193. // Twiddle the destPath when its a relative path - meaning, make it
  194. // relative to the WORKINGDIR
  195. if !filepath.IsAbs(destPath) {
  196. hasSlash := strings.HasSuffix(destPath, "/")
  197. destPath = filepath.Join("/", b.Config.WorkingDir, destPath)
  198. // Make sure we preserve any trailing slash
  199. if hasSlash {
  200. destPath += "/"
  201. }
  202. }
  203. // In the remote/URL case, download it and gen its hashcode
  204. if urlutil.IsURL(origPath) {
  205. if !allowRemote {
  206. return fmt.Errorf("Source can't be a URL for %s", cmdName)
  207. }
  208. ci := copyInfo{}
  209. ci.origPath = origPath
  210. ci.hash = origPath // default to this but can change
  211. ci.destPath = destPath
  212. ci.decompress = false
  213. *cInfos = append(*cInfos, &ci)
  214. // Initiate the download
  215. resp, err := httputils.Download(ci.origPath)
  216. if err != nil {
  217. return err
  218. }
  219. // Create a tmp dir
  220. tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
  221. if err != nil {
  222. return err
  223. }
  224. ci.tmpDir = tmpDirName
  225. // Create a tmp file within our tmp dir
  226. tmpFileName := path.Join(tmpDirName, "tmp")
  227. tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
  228. if err != nil {
  229. return err
  230. }
  231. // Download and dump result to tmp file
  232. if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
  233. In: resp.Body,
  234. Out: b.OutOld,
  235. Formatter: b.StreamFormatter,
  236. Size: int(resp.ContentLength),
  237. NewLines: true,
  238. ID: "",
  239. Action: "Downloading",
  240. })); err != nil {
  241. tmpFile.Close()
  242. return err
  243. }
  244. fmt.Fprintf(b.OutStream, "\n")
  245. tmpFile.Close()
  246. // Set the mtime to the Last-Modified header value if present
  247. // Otherwise just remove atime and mtime
  248. times := make([]syscall.Timespec, 2)
  249. lastMod := resp.Header.Get("Last-Modified")
  250. if lastMod != "" {
  251. mTime, err := http.ParseTime(lastMod)
  252. // If we can't parse it then just let it default to 'zero'
  253. // otherwise use the parsed time value
  254. if err == nil {
  255. times[1] = syscall.NsecToTimespec(mTime.UnixNano())
  256. }
  257. }
  258. if err := system.UtimesNano(tmpFileName, times); err != nil {
  259. return err
  260. }
  261. ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
  262. // If the destination is a directory, figure out the filename.
  263. if strings.HasSuffix(ci.destPath, "/") {
  264. u, err := url.Parse(origPath)
  265. if err != nil {
  266. return err
  267. }
  268. path := u.Path
  269. if strings.HasSuffix(path, "/") {
  270. path = path[:len(path)-1]
  271. }
  272. parts := strings.Split(path, "/")
  273. filename := parts[len(parts)-1]
  274. if filename == "" {
  275. return fmt.Errorf("cannot determine filename from url: %s", u)
  276. }
  277. ci.destPath = ci.destPath + filename
  278. }
  279. // Calc the checksum, even if we're using the cache
  280. r, err := archive.Tar(tmpFileName, archive.Uncompressed)
  281. if err != nil {
  282. return err
  283. }
  284. tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
  285. if err != nil {
  286. return err
  287. }
  288. if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
  289. return err
  290. }
  291. ci.hash = tarSum.Sum(nil)
  292. r.Close()
  293. return nil
  294. }
  295. // Deal with wildcards
  296. if ContainsWildcards(origPath) {
  297. for _, fileInfo := range b.context.GetSums() {
  298. if fileInfo.Name() == "" {
  299. continue
  300. }
  301. match, _ := path.Match(origPath, fileInfo.Name())
  302. if !match {
  303. continue
  304. }
  305. calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
  306. }
  307. return nil
  308. }
  309. // Must be a dir or a file
  310. if err := b.checkPathForAddition(origPath); err != nil {
  311. return err
  312. }
  313. fi, _ := os.Stat(path.Join(b.contextPath, origPath))
  314. ci := copyInfo{}
  315. ci.origPath = origPath
  316. ci.hash = origPath
  317. ci.destPath = destPath
  318. ci.decompress = allowDecompression
  319. *cInfos = append(*cInfos, &ci)
  320. // Deal with the single file case
  321. if !fi.IsDir() {
  322. // This will match first file in sums of the archive
  323. fis := b.context.GetSums().GetFile(ci.origPath)
  324. if fis != nil {
  325. ci.hash = "file:" + fis.Sum()
  326. }
  327. return nil
  328. }
  329. // Must be a dir
  330. var subfiles []string
  331. absOrigPath := path.Join(b.contextPath, ci.origPath)
  332. // Add a trailing / to make sure we only pick up nested files under
  333. // the dir and not sibling files of the dir that just happen to
  334. // start with the same chars
  335. if !strings.HasSuffix(absOrigPath, "/") {
  336. absOrigPath += "/"
  337. }
  338. // Need path w/o / too to find matching dir w/o trailing /
  339. absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
  340. for _, fileInfo := range b.context.GetSums() {
  341. absFile := path.Join(b.contextPath, fileInfo.Name())
  342. // Any file in the context that starts with the given path will be
  343. // picked up and its hashcode used. However, we'll exclude the
  344. // root dir itself. We do this for a coupel of reasons:
  345. // 1 - ADD/COPY will not copy the dir itself, just its children
  346. // so there's no reason to include it in the hash calc
  347. // 2 - the metadata on the dir will change when any child file
  348. // changes. This will lead to a miss in the cache check if that
  349. // child file is in the .dockerignore list.
  350. if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
  351. subfiles = append(subfiles, fileInfo.Sum())
  352. }
  353. }
  354. sort.Strings(subfiles)
  355. hasher := sha256.New()
  356. hasher.Write([]byte(strings.Join(subfiles, ",")))
  357. ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
  358. return nil
  359. }
  360. func ContainsWildcards(name string) bool {
  361. for i := 0; i < len(name); i++ {
  362. ch := name[i]
  363. if ch == '\\' {
  364. i++
  365. } else if ch == '*' || ch == '?' || ch == '[' {
  366. return true
  367. }
  368. }
  369. return false
  370. }
  371. func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
  372. remote, tag := parsers.ParseRepositoryTag(name)
  373. if tag == "" {
  374. tag = "latest"
  375. }
  376. pullRegistryAuth := b.AuthConfig
  377. if len(b.ConfigFile.AuthConfigs) > 0 {
  378. // The request came with a full auth config file, we prefer to use that
  379. repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote)
  380. if err != nil {
  381. return nil, err
  382. }
  383. resolvedAuth := registry.ResolveAuthConfig(b.ConfigFile, repoInfo.Index)
  384. pullRegistryAuth = &resolvedAuth
  385. }
  386. imagePullConfig := &graph.ImagePullConfig{
  387. Parallel: true,
  388. AuthConfig: pullRegistryAuth,
  389. OutStream: ioutils.NopWriteCloser(b.OutOld),
  390. Json: b.StreamFormatter.Json(),
  391. }
  392. if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
  393. return nil, err
  394. }
  395. image, err := b.Daemon.Repositories().LookupImage(name)
  396. if err != nil {
  397. return nil, err
  398. }
  399. return image, nil
  400. }
  401. func (b *Builder) processImageFrom(img *imagepkg.Image) error {
  402. b.image = img.ID
  403. if img.Config != nil {
  404. b.Config = img.Config
  405. }
  406. if len(b.Config.Env) == 0 {
  407. b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
  408. }
  409. // Process ONBUILD triggers if they exist
  410. if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
  411. fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
  412. }
  413. // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
  414. onBuildTriggers := b.Config.OnBuild
  415. b.Config.OnBuild = []string{}
  416. // parse the ONBUILD triggers by invoking the parser
  417. for stepN, step := range onBuildTriggers {
  418. ast, err := parser.Parse(strings.NewReader(step))
  419. if err != nil {
  420. return err
  421. }
  422. for i, n := range ast.Children {
  423. switch strings.ToUpper(n.Value) {
  424. case "ONBUILD":
  425. return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
  426. case "MAINTAINER", "FROM":
  427. return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
  428. }
  429. fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
  430. if err := b.dispatch(i, n); err != nil {
  431. return err
  432. }
  433. }
  434. }
  435. return nil
  436. }
  437. // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
  438. // and if so attempts to look up the current `b.image` and `b.Config` pair
  439. // in the current server `b.Daemon`. If an image is found, probeCache returns
  440. // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
  441. // is any error, it returns `(false, err)`.
  442. func (b *Builder) probeCache() (bool, error) {
  443. if !b.UtilizeCache || b.cacheBusted {
  444. return false, nil
  445. }
  446. cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
  447. if err != nil {
  448. return false, err
  449. }
  450. if cache == nil {
  451. logrus.Debugf("[BUILDER] Cache miss")
  452. b.cacheBusted = true
  453. return false, nil
  454. }
  455. fmt.Fprintf(b.OutStream, " ---> Using cache\n")
  456. logrus.Debugf("[BUILDER] Use cached version")
  457. b.image = cache.ID
  458. return true, nil
  459. }
  460. func (b *Builder) create() (*daemon.Container, error) {
  461. if b.image == "" && !b.noBaseImage {
  462. return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
  463. }
  464. b.Config.Image = b.image
  465. hostConfig := &runconfig.HostConfig{
  466. CpuShares: b.cpuShares,
  467. CpusetCpus: b.cpuSetCpus,
  468. CpusetMems: b.cpuSetMems,
  469. Memory: b.memory,
  470. MemorySwap: b.memorySwap,
  471. }
  472. config := *b.Config
  473. // Create the container
  474. c, warnings, err := b.Daemon.Create(b.Config, hostConfig, "")
  475. if err != nil {
  476. return nil, err
  477. }
  478. for _, warning := range warnings {
  479. fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
  480. }
  481. b.TmpContainers[c.ID] = struct{}{}
  482. fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID))
  483. if config.Cmd.Len() > 0 {
  484. // override the entry point that may have been picked up from the base image
  485. s := config.Cmd.Slice()
  486. c.Path = s[0]
  487. c.Args = s[1:]
  488. } else {
  489. config.Cmd = runconfig.NewCommand()
  490. }
  491. return c, nil
  492. }
  493. func (b *Builder) run(c *daemon.Container) error {
  494. var errCh chan error
  495. if b.Verbose {
  496. errCh = c.Attach(nil, b.OutStream, b.ErrStream)
  497. }
  498. //start the container
  499. if err := c.Start(); err != nil {
  500. return err
  501. }
  502. finished := make(chan struct{})
  503. defer close(finished)
  504. go func() {
  505. select {
  506. case <-b.cancelled:
  507. logrus.Debugln("Build cancelled, killing container:", c.ID)
  508. c.Kill()
  509. case <-finished:
  510. }
  511. }()
  512. if b.Verbose {
  513. // Block on reading output from container, stop on err or chan closed
  514. if err := <-errCh; err != nil {
  515. return err
  516. }
  517. }
  518. // Wait for it to finish
  519. if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
  520. return &jsonmessage.JSONError{
  521. Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
  522. Code: ret,
  523. }
  524. }
  525. return nil
  526. }
  527. func (b *Builder) checkPathForAddition(orig string) error {
  528. origPath := path.Join(b.contextPath, orig)
  529. origPath, err := filepath.EvalSymlinks(origPath)
  530. if err != nil {
  531. if os.IsNotExist(err) {
  532. return fmt.Errorf("%s: no such file or directory", orig)
  533. }
  534. return err
  535. }
  536. if !strings.HasPrefix(origPath, b.contextPath) {
  537. return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
  538. }
  539. if _, err := os.Stat(origPath); err != nil {
  540. if os.IsNotExist(err) {
  541. return fmt.Errorf("%s: no such file or directory", orig)
  542. }
  543. return err
  544. }
  545. return nil
  546. }
  547. func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
  548. var (
  549. err error
  550. destExists = true
  551. origPath = path.Join(b.contextPath, orig)
  552. destPath = path.Join(container.RootfsPath(), dest)
  553. )
  554. if destPath != container.RootfsPath() {
  555. destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
  556. if err != nil {
  557. return err
  558. }
  559. }
  560. // Preserve the trailing '/'
  561. if strings.HasSuffix(dest, "/") || dest == "." {
  562. destPath = destPath + "/"
  563. }
  564. destStat, err := os.Stat(destPath)
  565. if err != nil {
  566. if !os.IsNotExist(err) {
  567. return err
  568. }
  569. destExists = false
  570. }
  571. fi, err := os.Stat(origPath)
  572. if err != nil {
  573. if os.IsNotExist(err) {
  574. return fmt.Errorf("%s: no such file or directory", orig)
  575. }
  576. return err
  577. }
  578. if fi.IsDir() {
  579. return copyAsDirectory(origPath, destPath, destExists)
  580. }
  581. // If we are adding a remote file (or we've been told not to decompress), do not try to untar it
  582. if decompress {
  583. // First try to unpack the source as an archive
  584. // to support the untar feature we need to clean up the path a little bit
  585. // because tar is very forgiving. First we need to strip off the archive's
  586. // filename from the path but this is only added if it does not end in / .
  587. tarDest := destPath
  588. if strings.HasSuffix(tarDest, "/") {
  589. tarDest = filepath.Dir(destPath)
  590. }
  591. // try to successfully untar the orig
  592. if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
  593. return nil
  594. } else if err != io.EOF {
  595. logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
  596. }
  597. }
  598. if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
  599. return err
  600. }
  601. if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
  602. return err
  603. }
  604. resPath := destPath
  605. if destExists && destStat.IsDir() {
  606. resPath = path.Join(destPath, path.Base(origPath))
  607. }
  608. return fixPermissions(origPath, resPath, 0, 0, destExists)
  609. }
  610. func copyAsDirectory(source, destination string, destExisted bool) error {
  611. if err := chrootarchive.CopyWithTar(source, destination); err != nil {
  612. return err
  613. }
  614. return fixPermissions(source, destination, 0, 0, destExisted)
  615. }
  616. func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
  617. // If the destination didn't already exist, or the destination isn't a
  618. // directory, then we should Lchown the destination. Otherwise, we shouldn't
  619. // Lchown the destination.
  620. destStat, err := os.Stat(destination)
  621. if err != nil {
  622. // This should *never* be reached, because the destination must've already
  623. // been created while untar-ing the context.
  624. return err
  625. }
  626. doChownDestination := !destExisted || !destStat.IsDir()
  627. // We Walk on the source rather than on the destination because we don't
  628. // want to change permissions on things we haven't created or modified.
  629. return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
  630. // Do not alter the walk root iff. it existed before, as it doesn't fall under
  631. // the domain of "things we should chown".
  632. if !doChownDestination && (source == fullpath) {
  633. return nil
  634. }
  635. // Path is prefixed by source: substitute with destination instead.
  636. cleaned, err := filepath.Rel(source, fullpath)
  637. if err != nil {
  638. return err
  639. }
  640. fullpath = path.Join(destination, cleaned)
  641. return os.Lchown(fullpath, uid, gid)
  642. })
  643. }
  644. func (b *Builder) clearTmp() {
  645. for c := range b.TmpContainers {
  646. tmp, err := b.Daemon.Get(c)
  647. if err != nil {
  648. fmt.Fprint(b.OutStream, err.Error())
  649. }
  650. if err := b.Daemon.Rm(tmp); err != nil {
  651. fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
  652. return
  653. }
  654. b.Daemon.DeleteVolumes(tmp.VolumePaths())
  655. delete(b.TmpContainers, c)
  656. fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
  657. }
  658. }