internals.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. package builder
  2. // internals for handling commands. Covers many areas and a lot of
  3. // non-contiguous functionality. Please read the comments.
  4. import (
  5. "crypto/sha256"
  6. "encoding/hex"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "net/http"
  11. "net/url"
  12. "os"
  13. "path"
  14. "path/filepath"
  15. "sort"
  16. "strings"
  17. "syscall"
  18. "time"
  19. log "github.com/Sirupsen/logrus"
  20. "github.com/docker/docker/builder/parser"
  21. "github.com/docker/docker/daemon"
  22. imagepkg "github.com/docker/docker/image"
  23. "github.com/docker/docker/pkg/archive"
  24. "github.com/docker/docker/pkg/chrootarchive"
  25. "github.com/docker/docker/pkg/parsers"
  26. "github.com/docker/docker/pkg/symlink"
  27. "github.com/docker/docker/pkg/system"
  28. "github.com/docker/docker/pkg/tarsum"
  29. "github.com/docker/docker/pkg/urlutil"
  30. "github.com/docker/docker/registry"
  31. "github.com/docker/docker/utils"
  32. )
  33. func (b *Builder) readContext(context io.Reader) error {
  34. tmpdirPath, err := ioutil.TempDir("", "docker-build")
  35. if err != nil {
  36. return err
  37. }
  38. decompressedStream, err := archive.DecompressStream(context)
  39. if err != nil {
  40. return err
  41. }
  42. if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
  43. return err
  44. }
  45. if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
  46. return err
  47. }
  48. b.contextPath = tmpdirPath
  49. return nil
  50. }
  51. func (b *Builder) commit(id string, autoCmd []string, comment string) error {
  52. if b.image == "" {
  53. return fmt.Errorf("Please provide a source image with `from` prior to commit")
  54. }
  55. b.Config.Image = b.image
  56. if id == "" {
  57. cmd := b.Config.Cmd
  58. b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
  59. defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
  60. hit, err := b.probeCache()
  61. if err != nil {
  62. return err
  63. }
  64. if hit {
  65. return nil
  66. }
  67. container, err := b.create()
  68. if err != nil {
  69. return err
  70. }
  71. id = container.ID
  72. if err := container.Mount(); err != nil {
  73. return err
  74. }
  75. defer container.Unmount()
  76. }
  77. container := b.Daemon.Get(id)
  78. if container == nil {
  79. return fmt.Errorf("An error occured while creating the container")
  80. }
  81. // Note: Actually copy the struct
  82. autoConfig := *b.Config
  83. autoConfig.Cmd = autoCmd
  84. // Commit the container
  85. image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
  86. if err != nil {
  87. return err
  88. }
  89. b.image = image.ID
  90. return nil
  91. }
  92. type copyInfo struct {
  93. origPath string
  94. destPath string
  95. hash string
  96. decompress bool
  97. tmpDir string
  98. }
  99. func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
  100. if b.context == nil {
  101. return fmt.Errorf("No context given. Impossible to use %s", cmdName)
  102. }
  103. if len(args) < 2 {
  104. return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
  105. }
  106. dest := args[len(args)-1] // last one is always the dest
  107. copyInfos := []*copyInfo{}
  108. b.Config.Image = b.image
  109. defer func() {
  110. for _, ci := range copyInfos {
  111. if ci.tmpDir != "" {
  112. os.RemoveAll(ci.tmpDir)
  113. }
  114. }
  115. }()
  116. // Loop through each src file and calculate the info we need to
  117. // do the copy (e.g. hash value if cached). Don't actually do
  118. // the copy until we've looked at all src files
  119. for _, orig := range args[0 : len(args)-1] {
  120. err := calcCopyInfo(b, cmdName, &copyInfos, orig, dest, allowRemote, allowDecompression)
  121. if err != nil {
  122. return err
  123. }
  124. }
  125. if len(copyInfos) == 0 {
  126. return fmt.Errorf("No source files were specified")
  127. }
  128. if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") {
  129. return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
  130. }
  131. // For backwards compat, if there's just one CI then use it as the
  132. // cache look-up string, otherwise hash 'em all into one
  133. var srcHash string
  134. var origPaths string
  135. if len(copyInfos) == 1 {
  136. srcHash = copyInfos[0].hash
  137. origPaths = copyInfos[0].origPath
  138. } else {
  139. var hashs []string
  140. var origs []string
  141. for _, ci := range copyInfos {
  142. hashs = append(hashs, ci.hash)
  143. origs = append(origs, ci.origPath)
  144. }
  145. hasher := sha256.New()
  146. hasher.Write([]byte(strings.Join(hashs, ",")))
  147. srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
  148. origPaths = strings.Join(origs, " ")
  149. }
  150. cmd := b.Config.Cmd
  151. b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)}
  152. defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
  153. hit, err := b.probeCache()
  154. if err != nil {
  155. return err
  156. }
  157. // If we do not have at least one hash, never use the cache
  158. if hit && b.UtilizeCache {
  159. return nil
  160. }
  161. container, _, err := b.Daemon.Create(b.Config, nil, "")
  162. if err != nil {
  163. return err
  164. }
  165. b.TmpContainers[container.ID] = struct{}{}
  166. if err := container.Mount(); err != nil {
  167. return err
  168. }
  169. defer container.Unmount()
  170. for _, ci := range copyInfos {
  171. if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
  172. return err
  173. }
  174. }
  175. if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
  176. return err
  177. }
  178. return nil
  179. }
  180. func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
  181. if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
  182. origPath = origPath[1:]
  183. }
  184. origPath = strings.TrimPrefix(origPath, "./")
  185. // In the remote/URL case, download it and gen its hashcode
  186. if urlutil.IsURL(origPath) {
  187. if !allowRemote {
  188. return fmt.Errorf("Source can't be a URL for %s", cmdName)
  189. }
  190. ci := copyInfo{}
  191. ci.origPath = origPath
  192. ci.hash = origPath // default to this but can change
  193. ci.destPath = destPath
  194. ci.decompress = false
  195. *cInfos = append(*cInfos, &ci)
  196. // Initiate the download
  197. resp, err := utils.Download(ci.origPath)
  198. if err != nil {
  199. return err
  200. }
  201. // Create a tmp dir
  202. tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
  203. if err != nil {
  204. return err
  205. }
  206. ci.tmpDir = tmpDirName
  207. // Create a tmp file within our tmp dir
  208. tmpFileName := path.Join(tmpDirName, "tmp")
  209. tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
  210. if err != nil {
  211. return err
  212. }
  213. // Download and dump result to tmp file
  214. if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil {
  215. tmpFile.Close()
  216. return err
  217. }
  218. fmt.Fprintf(b.OutStream, "\n")
  219. tmpFile.Close()
  220. // Set the mtime to the Last-Modified header value if present
  221. // Otherwise just remove atime and mtime
  222. times := make([]syscall.Timespec, 2)
  223. lastMod := resp.Header.Get("Last-Modified")
  224. if lastMod != "" {
  225. mTime, err := http.ParseTime(lastMod)
  226. // If we can't parse it then just let it default to 'zero'
  227. // otherwise use the parsed time value
  228. if err == nil {
  229. times[1] = syscall.NsecToTimespec(mTime.UnixNano())
  230. }
  231. }
  232. if err := system.UtimesNano(tmpFileName, times); err != nil {
  233. return err
  234. }
  235. ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
  236. // If the destination is a directory, figure out the filename.
  237. if strings.HasSuffix(ci.destPath, "/") {
  238. u, err := url.Parse(origPath)
  239. if err != nil {
  240. return err
  241. }
  242. path := u.Path
  243. if strings.HasSuffix(path, "/") {
  244. path = path[:len(path)-1]
  245. }
  246. parts := strings.Split(path, "/")
  247. filename := parts[len(parts)-1]
  248. if filename == "" {
  249. return fmt.Errorf("cannot determine filename from url: %s", u)
  250. }
  251. ci.destPath = ci.destPath + filename
  252. }
  253. // Calc the checksum, only if we're using the cache
  254. if b.UtilizeCache {
  255. r, err := archive.Tar(tmpFileName, archive.Uncompressed)
  256. if err != nil {
  257. return err
  258. }
  259. tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
  260. if err != nil {
  261. return err
  262. }
  263. if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
  264. return err
  265. }
  266. ci.hash = tarSum.Sum(nil)
  267. r.Close()
  268. }
  269. return nil
  270. }
  271. // Deal with wildcards
  272. if ContainsWildcards(origPath) {
  273. for _, fileInfo := range b.context.GetSums() {
  274. if fileInfo.Name() == "" {
  275. continue
  276. }
  277. match, _ := path.Match(origPath, fileInfo.Name())
  278. if !match {
  279. continue
  280. }
  281. calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
  282. }
  283. return nil
  284. }
  285. // Must be a dir or a file
  286. if err := b.checkPathForAddition(origPath); err != nil {
  287. return err
  288. }
  289. fi, _ := os.Stat(path.Join(b.contextPath, origPath))
  290. ci := copyInfo{}
  291. ci.origPath = origPath
  292. ci.hash = origPath
  293. ci.destPath = destPath
  294. ci.decompress = allowDecompression
  295. *cInfos = append(*cInfos, &ci)
  296. // If not using cache don't need to do anything else.
  297. // If we are using a cache then calc the hash for the src file/dir
  298. if !b.UtilizeCache {
  299. return nil
  300. }
  301. // Deal with the single file case
  302. if !fi.IsDir() {
  303. // This will match first file in sums of the archive
  304. fis := b.context.GetSums().GetFile(ci.origPath)
  305. if fis != nil {
  306. ci.hash = "file:" + fis.Sum()
  307. }
  308. return nil
  309. }
  310. // Must be a dir
  311. var subfiles []string
  312. absOrigPath := path.Join(b.contextPath, ci.origPath)
  313. // Add a trailing / to make sure we only pick up nested files under
  314. // the dir and not sibling files of the dir that just happen to
  315. // start with the same chars
  316. if !strings.HasSuffix(absOrigPath, "/") {
  317. absOrigPath += "/"
  318. }
  319. // Need path w/o / too to find matching dir w/o trailing /
  320. absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
  321. for _, fileInfo := range b.context.GetSums() {
  322. absFile := path.Join(b.contextPath, fileInfo.Name())
  323. if strings.HasPrefix(absFile, absOrigPath) || absFile == absOrigPathNoSlash {
  324. subfiles = append(subfiles, fileInfo.Sum())
  325. }
  326. }
  327. sort.Strings(subfiles)
  328. hasher := sha256.New()
  329. hasher.Write([]byte(strings.Join(subfiles, ",")))
  330. ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
  331. return nil
  332. }
  333. func ContainsWildcards(name string) bool {
  334. for i := 0; i < len(name); i++ {
  335. ch := name[i]
  336. if ch == '\\' {
  337. i++
  338. } else if ch == '*' || ch == '?' || ch == '[' {
  339. return true
  340. }
  341. }
  342. return false
  343. }
  344. func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
  345. remote, tag := parsers.ParseRepositoryTag(name)
  346. if tag == "" {
  347. tag = "latest"
  348. }
  349. pullRegistryAuth := b.AuthConfig
  350. if len(b.AuthConfigFile.Configs) > 0 {
  351. // The request came with a full auth config file, we prefer to use that
  352. endpoint, _, err := registry.ResolveRepositoryName(remote)
  353. if err != nil {
  354. return nil, err
  355. }
  356. resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(endpoint)
  357. pullRegistryAuth = &resolvedAuth
  358. }
  359. job := b.Engine.Job("pull", remote, tag)
  360. job.SetenvBool("json", b.StreamFormatter.Json())
  361. job.SetenvBool("parallel", true)
  362. job.SetenvJson("authConfig", pullRegistryAuth)
  363. job.Stdout.Add(b.OutOld)
  364. if err := job.Run(); err != nil {
  365. return nil, err
  366. }
  367. image, err := b.Daemon.Repositories().LookupImage(name)
  368. if err != nil {
  369. return nil, err
  370. }
  371. return image, nil
  372. }
  373. func (b *Builder) processImageFrom(img *imagepkg.Image) error {
  374. b.image = img.ID
  375. if img.Config != nil {
  376. b.Config = img.Config
  377. }
  378. if len(b.Config.Env) == 0 {
  379. b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
  380. }
  381. // Process ONBUILD triggers if they exist
  382. if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
  383. fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
  384. }
  385. // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
  386. onBuildTriggers := b.Config.OnBuild
  387. b.Config.OnBuild = []string{}
  388. // parse the ONBUILD triggers by invoking the parser
  389. for stepN, step := range onBuildTriggers {
  390. ast, err := parser.Parse(strings.NewReader(step))
  391. if err != nil {
  392. return err
  393. }
  394. for i, n := range ast.Children {
  395. switch strings.ToUpper(n.Value) {
  396. case "ONBUILD":
  397. return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
  398. case "MAINTAINER", "FROM":
  399. return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
  400. }
  401. fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
  402. if err := b.dispatch(i, n); err != nil {
  403. return err
  404. }
  405. }
  406. }
  407. return nil
  408. }
  409. // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
  410. // and if so attempts to look up the current `b.image` and `b.Config` pair
  411. // in the current server `b.Daemon`. If an image is found, probeCache returns
  412. // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
  413. // is any error, it returns `(false, err)`.
  414. func (b *Builder) probeCache() (bool, error) {
  415. if b.UtilizeCache {
  416. if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil {
  417. return false, err
  418. } else if cache != nil {
  419. fmt.Fprintf(b.OutStream, " ---> Using cache\n")
  420. log.Debugf("[BUILDER] Use cached version")
  421. b.image = cache.ID
  422. return true, nil
  423. } else {
  424. log.Debugf("[BUILDER] Cache miss")
  425. }
  426. }
  427. return false, nil
  428. }
  429. func (b *Builder) create() (*daemon.Container, error) {
  430. if b.image == "" {
  431. return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
  432. }
  433. b.Config.Image = b.image
  434. config := *b.Config
  435. // Create the container
  436. c, warnings, err := b.Daemon.Create(b.Config, nil, "")
  437. if err != nil {
  438. return nil, err
  439. }
  440. for _, warning := range warnings {
  441. fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
  442. }
  443. b.TmpContainers[c.ID] = struct{}{}
  444. fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
  445. // override the entry point that may have been picked up from the base image
  446. c.Path = config.Cmd[0]
  447. c.Args = config.Cmd[1:]
  448. return c, nil
  449. }
  450. func (b *Builder) run(c *daemon.Container) error {
  451. //start the container
  452. if err := c.Start(); err != nil {
  453. return err
  454. }
  455. if b.Verbose {
  456. logsJob := b.Engine.Job("logs", c.ID)
  457. logsJob.Setenv("follow", "1")
  458. logsJob.Setenv("stdout", "1")
  459. logsJob.Setenv("stderr", "1")
  460. logsJob.Stdout.Add(b.OutStream)
  461. logsJob.Stderr.Set(b.ErrStream)
  462. if err := logsJob.Run(); err != nil {
  463. return err
  464. }
  465. }
  466. // Wait for it to finish
  467. if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
  468. err := &utils.JSONError{
  469. Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
  470. Code: ret,
  471. }
  472. return err
  473. }
  474. return nil
  475. }
  476. func (b *Builder) checkPathForAddition(orig string) error {
  477. origPath := path.Join(b.contextPath, orig)
  478. origPath, err := filepath.EvalSymlinks(origPath)
  479. if err != nil {
  480. if os.IsNotExist(err) {
  481. return fmt.Errorf("%s: no such file or directory", orig)
  482. }
  483. return err
  484. }
  485. if !strings.HasPrefix(origPath, b.contextPath) {
  486. return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
  487. }
  488. if _, err := os.Stat(origPath); err != nil {
  489. if os.IsNotExist(err) {
  490. return fmt.Errorf("%s: no such file or directory", orig)
  491. }
  492. return err
  493. }
  494. return nil
  495. }
  496. func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
  497. var (
  498. err error
  499. destExists = true
  500. origPath = path.Join(b.contextPath, orig)
  501. destPath = path.Join(container.RootfsPath(), dest)
  502. )
  503. if destPath != container.RootfsPath() {
  504. destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
  505. if err != nil {
  506. return err
  507. }
  508. }
  509. // Preserve the trailing '/'
  510. if strings.HasSuffix(dest, "/") || dest == "." {
  511. destPath = destPath + "/"
  512. }
  513. destStat, err := os.Stat(destPath)
  514. if err != nil {
  515. if !os.IsNotExist(err) {
  516. return err
  517. }
  518. destExists = false
  519. }
  520. fi, err := os.Stat(origPath)
  521. if err != nil {
  522. if os.IsNotExist(err) {
  523. return fmt.Errorf("%s: no such file or directory", orig)
  524. }
  525. return err
  526. }
  527. if fi.IsDir() {
  528. return copyAsDirectory(origPath, destPath, destExists)
  529. }
  530. // If we are adding a remote file (or we've been told not to decompress), do not try to untar it
  531. if decompress {
  532. // First try to unpack the source as an archive
  533. // to support the untar feature we need to clean up the path a little bit
  534. // because tar is very forgiving. First we need to strip off the archive's
  535. // filename from the path but this is only added if it does not end in / .
  536. tarDest := destPath
  537. if strings.HasSuffix(tarDest, "/") {
  538. tarDest = filepath.Dir(destPath)
  539. }
  540. // try to successfully untar the orig
  541. if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
  542. return nil
  543. } else if err != io.EOF {
  544. log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
  545. }
  546. }
  547. if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
  548. return err
  549. }
  550. if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
  551. return err
  552. }
  553. resPath := destPath
  554. if destExists && destStat.IsDir() {
  555. resPath = path.Join(destPath, path.Base(origPath))
  556. }
  557. return fixPermissions(origPath, resPath, 0, 0, destExists)
  558. }
  559. func copyAsDirectory(source, destination string, destExisted bool) error {
  560. if err := chrootarchive.CopyWithTar(source, destination); err != nil {
  561. return err
  562. }
  563. return fixPermissions(source, destination, 0, 0, destExisted)
  564. }
  565. func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
  566. // If the destination didn't already exist, or the destination isn't a
  567. // directory, then we should Lchown the destination. Otherwise, we shouldn't
  568. // Lchown the destination.
  569. destStat, err := os.Stat(destination)
  570. if err != nil {
  571. // This should *never* be reached, because the destination must've already
  572. // been created while untar-ing the context.
  573. return err
  574. }
  575. doChownDestination := !destExisted || !destStat.IsDir()
  576. // We Walk on the source rather than on the destination because we don't
  577. // want to change permissions on things we haven't created or modified.
  578. return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
  579. // Do not alter the walk root iff. it existed before, as it doesn't fall under
  580. // the domain of "things we should chown".
  581. if !doChownDestination && (source == fullpath) {
  582. return nil
  583. }
  584. // Path is prefixed by source: substitute with destination instead.
  585. cleaned, err := filepath.Rel(source, fullpath)
  586. if err != nil {
  587. return err
  588. }
  589. fullpath = path.Join(destination, cleaned)
  590. return os.Lchown(fullpath, uid, gid)
  591. })
  592. }
  593. func (b *Builder) clearTmp() {
  594. for c := range b.TmpContainers {
  595. tmp := b.Daemon.Get(c)
  596. if err := b.Daemon.Destroy(tmp); err != nil {
  597. fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
  598. return
  599. }
  600. b.Daemon.DeleteVolumes(tmp.VolumePaths())
  601. delete(b.TmpContainers, c)
  602. fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
  603. }
  604. }