internals.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. package builder
  2. // internals for handling commands. Covers many areas and a lot of
  3. // non-contiguous functionality. Please read the comments.
  4. import (
  5. "crypto/sha256"
  6. "encoding/hex"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "net/http"
  11. "net/url"
  12. "os"
  13. "path"
  14. "path/filepath"
  15. "sort"
  16. "strings"
  17. "syscall"
  18. "time"
  19. "github.com/Sirupsen/logrus"
  20. "github.com/docker/docker/builder/parser"
  21. "github.com/docker/docker/daemon"
  22. "github.com/docker/docker/graph"
  23. imagepkg "github.com/docker/docker/image"
  24. "github.com/docker/docker/pkg/archive"
  25. "github.com/docker/docker/pkg/chrootarchive"
  26. "github.com/docker/docker/pkg/httputils"
  27. "github.com/docker/docker/pkg/ioutils"
  28. "github.com/docker/docker/pkg/jsonmessage"
  29. "github.com/docker/docker/pkg/parsers"
  30. "github.com/docker/docker/pkg/progressreader"
  31. "github.com/docker/docker/pkg/stringid"
  32. "github.com/docker/docker/pkg/symlink"
  33. "github.com/docker/docker/pkg/system"
  34. "github.com/docker/docker/pkg/tarsum"
  35. "github.com/docker/docker/pkg/urlutil"
  36. "github.com/docker/docker/registry"
  37. "github.com/docker/docker/runconfig"
  38. )
  39. func (b *Builder) readContext(context io.Reader) error {
  40. tmpdirPath, err := ioutil.TempDir("", "docker-build")
  41. if err != nil {
  42. return err
  43. }
  44. decompressedStream, err := archive.DecompressStream(context)
  45. if err != nil {
  46. return err
  47. }
  48. if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
  49. return err
  50. }
  51. if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
  52. return err
  53. }
  54. b.contextPath = tmpdirPath
  55. return nil
  56. }
  57. func (b *Builder) commit(id string, autoCmd *runconfig.Command, comment string) error {
  58. if b.disableCommit {
  59. return nil
  60. }
  61. if b.image == "" && !b.noBaseImage {
  62. return fmt.Errorf("Please provide a source image with `from` prior to commit")
  63. }
  64. b.Config.Image = b.image
  65. if id == "" {
  66. cmd := b.Config.Cmd
  67. b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", "#(nop) "+comment)
  68. defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
  69. hit, err := b.probeCache()
  70. if err != nil {
  71. return err
  72. }
  73. if hit {
  74. return nil
  75. }
  76. container, err := b.create()
  77. if err != nil {
  78. return err
  79. }
  80. id = container.ID
  81. if err := container.Mount(); err != nil {
  82. return err
  83. }
  84. defer container.Unmount()
  85. }
  86. container, err := b.Daemon.Get(id)
  87. if err != nil {
  88. return err
  89. }
  90. // Note: Actually copy the struct
  91. autoConfig := *b.Config
  92. autoConfig.Cmd = autoCmd
  93. // Commit the container
  94. image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
  95. if err != nil {
  96. return err
  97. }
  98. b.image = image.ID
  99. return nil
  100. }
  101. type copyInfo struct {
  102. origPath string
  103. destPath string
  104. hash string
  105. decompress bool
  106. tmpDir string
  107. }
  108. func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
  109. if b.context == nil {
  110. return fmt.Errorf("No context given. Impossible to use %s", cmdName)
  111. }
  112. if len(args) < 2 {
  113. return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
  114. }
  115. dest := args[len(args)-1] // last one is always the dest
  116. copyInfos := []*copyInfo{}
  117. b.Config.Image = b.image
  118. defer func() {
  119. for _, ci := range copyInfos {
  120. if ci.tmpDir != "" {
  121. os.RemoveAll(ci.tmpDir)
  122. }
  123. }
  124. }()
  125. // Loop through each src file and calculate the info we need to
  126. // do the copy (e.g. hash value if cached). Don't actually do
  127. // the copy until we've looked at all src files
  128. for _, orig := range args[0 : len(args)-1] {
  129. if err := calcCopyInfo(
  130. b,
  131. cmdName,
  132. &copyInfos,
  133. orig,
  134. dest,
  135. allowRemote,
  136. allowDecompression,
  137. ); err != nil {
  138. return err
  139. }
  140. }
  141. if len(copyInfos) == 0 {
  142. return fmt.Errorf("No source files were specified")
  143. }
  144. if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") {
  145. return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
  146. }
  147. // For backwards compat, if there's just one CI then use it as the
  148. // cache look-up string, otherwise hash 'em all into one
  149. var srcHash string
  150. var origPaths string
  151. if len(copyInfos) == 1 {
  152. srcHash = copyInfos[0].hash
  153. origPaths = copyInfos[0].origPath
  154. } else {
  155. var hashs []string
  156. var origs []string
  157. for _, ci := range copyInfos {
  158. hashs = append(hashs, ci.hash)
  159. origs = append(origs, ci.origPath)
  160. }
  161. hasher := sha256.New()
  162. hasher.Write([]byte(strings.Join(hashs, ",")))
  163. srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
  164. origPaths = strings.Join(origs, " ")
  165. }
  166. cmd := b.Config.Cmd
  167. b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
  168. defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
  169. hit, err := b.probeCache()
  170. if err != nil {
  171. return err
  172. }
  173. if hit {
  174. return nil
  175. }
  176. container, _, err := b.Daemon.Create(b.Config, nil, "")
  177. if err != nil {
  178. return err
  179. }
  180. b.TmpContainers[container.ID] = struct{}{}
  181. if err := container.Mount(); err != nil {
  182. return err
  183. }
  184. defer container.Unmount()
  185. for _, ci := range copyInfos {
  186. if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
  187. return err
  188. }
  189. }
  190. if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
  191. return err
  192. }
  193. return nil
  194. }
  195. func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
  196. if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
  197. origPath = origPath[1:]
  198. }
  199. origPath = strings.TrimPrefix(origPath, "./")
  200. // Twiddle the destPath when its a relative path - meaning, make it
  201. // relative to the WORKINGDIR
  202. if !filepath.IsAbs(destPath) {
  203. hasSlash := strings.HasSuffix(destPath, "/")
  204. destPath = filepath.Join("/", b.Config.WorkingDir, destPath)
  205. // Make sure we preserve any trailing slash
  206. if hasSlash {
  207. destPath += "/"
  208. }
  209. }
  210. // In the remote/URL case, download it and gen its hashcode
  211. if urlutil.IsURL(origPath) {
  212. if !allowRemote {
  213. return fmt.Errorf("Source can't be a URL for %s", cmdName)
  214. }
  215. ci := copyInfo{}
  216. ci.origPath = origPath
  217. ci.hash = origPath // default to this but can change
  218. ci.destPath = destPath
  219. ci.decompress = false
  220. *cInfos = append(*cInfos, &ci)
  221. // Initiate the download
  222. resp, err := httputils.Download(ci.origPath)
  223. if err != nil {
  224. return err
  225. }
  226. // Create a tmp dir
  227. tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
  228. if err != nil {
  229. return err
  230. }
  231. ci.tmpDir = tmpDirName
  232. // Create a tmp file within our tmp dir
  233. tmpFileName := path.Join(tmpDirName, "tmp")
  234. tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
  235. if err != nil {
  236. return err
  237. }
  238. // Download and dump result to tmp file
  239. if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
  240. In: resp.Body,
  241. Out: b.OutOld,
  242. Formatter: b.StreamFormatter,
  243. Size: int(resp.ContentLength),
  244. NewLines: true,
  245. ID: "",
  246. Action: "Downloading",
  247. })); err != nil {
  248. tmpFile.Close()
  249. return err
  250. }
  251. fmt.Fprintf(b.OutStream, "\n")
  252. tmpFile.Close()
  253. // Set the mtime to the Last-Modified header value if present
  254. // Otherwise just remove atime and mtime
  255. times := make([]syscall.Timespec, 2)
  256. lastMod := resp.Header.Get("Last-Modified")
  257. if lastMod != "" {
  258. mTime, err := http.ParseTime(lastMod)
  259. // If we can't parse it then just let it default to 'zero'
  260. // otherwise use the parsed time value
  261. if err == nil {
  262. times[1] = syscall.NsecToTimespec(mTime.UnixNano())
  263. }
  264. }
  265. if err := system.UtimesNano(tmpFileName, times); err != nil {
  266. return err
  267. }
  268. ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
  269. // If the destination is a directory, figure out the filename.
  270. if strings.HasSuffix(ci.destPath, "/") {
  271. u, err := url.Parse(origPath)
  272. if err != nil {
  273. return err
  274. }
  275. path := u.Path
  276. if strings.HasSuffix(path, "/") {
  277. path = path[:len(path)-1]
  278. }
  279. parts := strings.Split(path, "/")
  280. filename := parts[len(parts)-1]
  281. if filename == "" {
  282. return fmt.Errorf("cannot determine filename from url: %s", u)
  283. }
  284. ci.destPath = ci.destPath + filename
  285. }
  286. // Calc the checksum, even if we're using the cache
  287. r, err := archive.Tar(tmpFileName, archive.Uncompressed)
  288. if err != nil {
  289. return err
  290. }
  291. tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
  292. if err != nil {
  293. return err
  294. }
  295. if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
  296. return err
  297. }
  298. ci.hash = tarSum.Sum(nil)
  299. r.Close()
  300. return nil
  301. }
  302. // Deal with wildcards
  303. if ContainsWildcards(origPath) {
  304. for _, fileInfo := range b.context.GetSums() {
  305. if fileInfo.Name() == "" {
  306. continue
  307. }
  308. match, _ := path.Match(origPath, fileInfo.Name())
  309. if !match {
  310. continue
  311. }
  312. calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
  313. }
  314. return nil
  315. }
  316. // Must be a dir or a file
  317. if err := b.checkPathForAddition(origPath); err != nil {
  318. return err
  319. }
  320. fi, _ := os.Stat(path.Join(b.contextPath, origPath))
  321. ci := copyInfo{}
  322. ci.origPath = origPath
  323. ci.hash = origPath
  324. ci.destPath = destPath
  325. ci.decompress = allowDecompression
  326. *cInfos = append(*cInfos, &ci)
  327. // Deal with the single file case
  328. if !fi.IsDir() {
  329. // This will match first file in sums of the archive
  330. fis := b.context.GetSums().GetFile(ci.origPath)
  331. if fis != nil {
  332. ci.hash = "file:" + fis.Sum()
  333. }
  334. return nil
  335. }
  336. // Must be a dir
  337. var subfiles []string
  338. absOrigPath := path.Join(b.contextPath, ci.origPath)
  339. // Add a trailing / to make sure we only pick up nested files under
  340. // the dir and not sibling files of the dir that just happen to
  341. // start with the same chars
  342. if !strings.HasSuffix(absOrigPath, "/") {
  343. absOrigPath += "/"
  344. }
  345. // Need path w/o / too to find matching dir w/o trailing /
  346. absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
  347. for _, fileInfo := range b.context.GetSums() {
  348. absFile := path.Join(b.contextPath, fileInfo.Name())
  349. // Any file in the context that starts with the given path will be
  350. // picked up and its hashcode used. However, we'll exclude the
  351. // root dir itself. We do this for a coupel of reasons:
  352. // 1 - ADD/COPY will not copy the dir itself, just its children
  353. // so there's no reason to include it in the hash calc
  354. // 2 - the metadata on the dir will change when any child file
  355. // changes. This will lead to a miss in the cache check if that
  356. // child file is in the .dockerignore list.
  357. if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
  358. subfiles = append(subfiles, fileInfo.Sum())
  359. }
  360. }
  361. sort.Strings(subfiles)
  362. hasher := sha256.New()
  363. hasher.Write([]byte(strings.Join(subfiles, ",")))
  364. ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
  365. return nil
  366. }
  367. func ContainsWildcards(name string) bool {
  368. for i := 0; i < len(name); i++ {
  369. ch := name[i]
  370. if ch == '\\' {
  371. i++
  372. } else if ch == '*' || ch == '?' || ch == '[' {
  373. return true
  374. }
  375. }
  376. return false
  377. }
  378. func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
  379. remote, tag := parsers.ParseRepositoryTag(name)
  380. if tag == "" {
  381. tag = "latest"
  382. }
  383. pullRegistryAuth := b.AuthConfig
  384. if len(b.ConfigFile.AuthConfigs) > 0 {
  385. // The request came with a full auth config file, we prefer to use that
  386. repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote)
  387. if err != nil {
  388. return nil, err
  389. }
  390. resolvedAuth := registry.ResolveAuthConfig(b.ConfigFile, repoInfo.Index)
  391. pullRegistryAuth = &resolvedAuth
  392. }
  393. imagePullConfig := &graph.ImagePullConfig{
  394. Parallel: true,
  395. AuthConfig: pullRegistryAuth,
  396. OutStream: ioutils.NopWriteCloser(b.OutOld),
  397. Json: b.StreamFormatter.Json(),
  398. }
  399. if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
  400. return nil, err
  401. }
  402. image, err := b.Daemon.Repositories().LookupImage(name)
  403. if err != nil {
  404. return nil, err
  405. }
  406. return image, nil
  407. }
  408. func (b *Builder) processImageFrom(img *imagepkg.Image) error {
  409. b.image = img.ID
  410. if img.Config != nil {
  411. b.Config = img.Config
  412. }
  413. if len(b.Config.Env) == 0 {
  414. b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
  415. }
  416. // Process ONBUILD triggers if they exist
  417. if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
  418. fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
  419. }
  420. // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
  421. onBuildTriggers := b.Config.OnBuild
  422. b.Config.OnBuild = []string{}
  423. // parse the ONBUILD triggers by invoking the parser
  424. for stepN, step := range onBuildTriggers {
  425. ast, err := parser.Parse(strings.NewReader(step))
  426. if err != nil {
  427. return err
  428. }
  429. for i, n := range ast.Children {
  430. switch strings.ToUpper(n.Value) {
  431. case "ONBUILD":
  432. return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
  433. case "MAINTAINER", "FROM":
  434. return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
  435. }
  436. fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
  437. if err := b.dispatch(i, n); err != nil {
  438. return err
  439. }
  440. }
  441. }
  442. return nil
  443. }
  444. // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
  445. // and if so attempts to look up the current `b.image` and `b.Config` pair
  446. // in the current server `b.Daemon`. If an image is found, probeCache returns
  447. // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
  448. // is any error, it returns `(false, err)`.
  449. func (b *Builder) probeCache() (bool, error) {
  450. if !b.UtilizeCache || b.cacheBusted {
  451. return false, nil
  452. }
  453. cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
  454. if err != nil {
  455. return false, err
  456. }
  457. if cache == nil {
  458. logrus.Debugf("[BUILDER] Cache miss")
  459. b.cacheBusted = true
  460. return false, nil
  461. }
  462. fmt.Fprintf(b.OutStream, " ---> Using cache\n")
  463. logrus.Debugf("[BUILDER] Use cached version")
  464. b.image = cache.ID
  465. return true, nil
  466. }
  467. func (b *Builder) create() (*daemon.Container, error) {
  468. if b.image == "" && !b.noBaseImage {
  469. return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
  470. }
  471. b.Config.Image = b.image
  472. hostConfig := &runconfig.HostConfig{
  473. CpuShares: b.cpuShares,
  474. CpuQuota: b.cpuQuota,
  475. CpusetCpus: b.cpuSetCpus,
  476. CpusetMems: b.cpuSetMems,
  477. Memory: b.memory,
  478. MemorySwap: b.memorySwap,
  479. }
  480. config := *b.Config
  481. // Create the container
  482. c, warnings, err := b.Daemon.Create(b.Config, hostConfig, "")
  483. if err != nil {
  484. return nil, err
  485. }
  486. for _, warning := range warnings {
  487. fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
  488. }
  489. b.TmpContainers[c.ID] = struct{}{}
  490. fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID))
  491. if config.Cmd.Len() > 0 {
  492. // override the entry point that may have been picked up from the base image
  493. s := config.Cmd.Slice()
  494. c.Path = s[0]
  495. c.Args = s[1:]
  496. } else {
  497. config.Cmd = runconfig.NewCommand()
  498. }
  499. return c, nil
  500. }
  501. func (b *Builder) run(c *daemon.Container) error {
  502. var errCh chan error
  503. if b.Verbose {
  504. errCh = c.Attach(nil, b.OutStream, b.ErrStream)
  505. }
  506. //start the container
  507. if err := c.Start(); err != nil {
  508. return err
  509. }
  510. finished := make(chan struct{})
  511. defer close(finished)
  512. go func() {
  513. select {
  514. case <-b.cancelled:
  515. logrus.Debugln("Build cancelled, killing container:", c.ID)
  516. c.Kill()
  517. case <-finished:
  518. }
  519. }()
  520. if b.Verbose {
  521. // Block on reading output from container, stop on err or chan closed
  522. if err := <-errCh; err != nil {
  523. return err
  524. }
  525. }
  526. // Wait for it to finish
  527. if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
  528. return &jsonmessage.JSONError{
  529. Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
  530. Code: ret,
  531. }
  532. }
  533. return nil
  534. }
  535. func (b *Builder) checkPathForAddition(orig string) error {
  536. origPath := path.Join(b.contextPath, orig)
  537. origPath, err := filepath.EvalSymlinks(origPath)
  538. if err != nil {
  539. if os.IsNotExist(err) {
  540. return fmt.Errorf("%s: no such file or directory", orig)
  541. }
  542. return err
  543. }
  544. if !strings.HasPrefix(origPath, b.contextPath) {
  545. return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
  546. }
  547. if _, err := os.Stat(origPath); err != nil {
  548. if os.IsNotExist(err) {
  549. return fmt.Errorf("%s: no such file or directory", orig)
  550. }
  551. return err
  552. }
  553. return nil
  554. }
  555. func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
  556. var (
  557. err error
  558. destExists = true
  559. origPath = path.Join(b.contextPath, orig)
  560. destPath = path.Join(container.RootfsPath(), dest)
  561. )
  562. if destPath != container.RootfsPath() {
  563. destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
  564. if err != nil {
  565. return err
  566. }
  567. }
  568. // Preserve the trailing '/'
  569. if strings.HasSuffix(dest, "/") || dest == "." {
  570. destPath = destPath + "/"
  571. }
  572. destStat, err := os.Stat(destPath)
  573. if err != nil {
  574. if !os.IsNotExist(err) {
  575. return err
  576. }
  577. destExists = false
  578. }
  579. fi, err := os.Stat(origPath)
  580. if err != nil {
  581. if os.IsNotExist(err) {
  582. return fmt.Errorf("%s: no such file or directory", orig)
  583. }
  584. return err
  585. }
  586. if fi.IsDir() {
  587. return copyAsDirectory(origPath, destPath, destExists)
  588. }
  589. // If we are adding a remote file (or we've been told not to decompress), do not try to untar it
  590. if decompress {
  591. // First try to unpack the source as an archive
  592. // to support the untar feature we need to clean up the path a little bit
  593. // because tar is very forgiving. First we need to strip off the archive's
  594. // filename from the path but this is only added if it does not end in / .
  595. tarDest := destPath
  596. if strings.HasSuffix(tarDest, "/") {
  597. tarDest = filepath.Dir(destPath)
  598. }
  599. // try to successfully untar the orig
  600. if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
  601. return nil
  602. } else if err != io.EOF {
  603. logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
  604. }
  605. }
  606. if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
  607. return err
  608. }
  609. if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
  610. return err
  611. }
  612. resPath := destPath
  613. if destExists && destStat.IsDir() {
  614. resPath = path.Join(destPath, path.Base(origPath))
  615. }
  616. return fixPermissions(origPath, resPath, 0, 0, destExists)
  617. }
  618. func copyAsDirectory(source, destination string, destExisted bool) error {
  619. if err := chrootarchive.CopyWithTar(source, destination); err != nil {
  620. return err
  621. }
  622. return fixPermissions(source, destination, 0, 0, destExisted)
  623. }
  624. func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
  625. // If the destination didn't already exist, or the destination isn't a
  626. // directory, then we should Lchown the destination. Otherwise, we shouldn't
  627. // Lchown the destination.
  628. destStat, err := os.Stat(destination)
  629. if err != nil {
  630. // This should *never* be reached, because the destination must've already
  631. // been created while untar-ing the context.
  632. return err
  633. }
  634. doChownDestination := !destExisted || !destStat.IsDir()
  635. // We Walk on the source rather than on the destination because we don't
  636. // want to change permissions on things we haven't created or modified.
  637. return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
  638. // Do not alter the walk root iff. it existed before, as it doesn't fall under
  639. // the domain of "things we should chown".
  640. if !doChownDestination && (source == fullpath) {
  641. return nil
  642. }
  643. // Path is prefixed by source: substitute with destination instead.
  644. cleaned, err := filepath.Rel(source, fullpath)
  645. if err != nil {
  646. return err
  647. }
  648. fullpath = path.Join(destination, cleaned)
  649. return os.Lchown(fullpath, uid, gid)
  650. })
  651. }
  652. func (b *Builder) clearTmp() {
  653. for c := range b.TmpContainers {
  654. tmp, err := b.Daemon.Get(c)
  655. if err != nil {
  656. fmt.Fprint(b.OutStream, err.Error())
  657. }
  658. if err := b.Daemon.Rm(tmp); err != nil {
  659. fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
  660. return
  661. }
  662. b.Daemon.DeleteVolumes(tmp.VolumePaths())
  663. delete(b.TmpContainers, c)
  664. fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
  665. }
  666. }