internals.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. package builder
  2. // internals for handling commands. Covers many areas and a lot of
  3. // non-contiguous functionality. Please read the comments.
  4. import (
  5. "crypto/sha256"
  6. "encoding/hex"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "net/http"
  11. "net/url"
  12. "os"
  13. "path/filepath"
  14. "runtime"
  15. "sort"
  16. "strings"
  17. "syscall"
  18. "time"
  19. "github.com/Sirupsen/logrus"
  20. "github.com/docker/docker/builder/parser"
  21. "github.com/docker/docker/cliconfig"
  22. "github.com/docker/docker/daemon"
  23. "github.com/docker/docker/graph"
  24. "github.com/docker/docker/pkg/archive"
  25. "github.com/docker/docker/pkg/chrootarchive"
  26. "github.com/docker/docker/pkg/httputils"
  27. "github.com/docker/docker/pkg/ioutils"
  28. "github.com/docker/docker/pkg/jsonmessage"
  29. "github.com/docker/docker/pkg/parsers"
  30. "github.com/docker/docker/pkg/progressreader"
  31. "github.com/docker/docker/pkg/stringid"
  32. "github.com/docker/docker/pkg/system"
  33. "github.com/docker/docker/pkg/tarsum"
  34. "github.com/docker/docker/pkg/urlutil"
  35. "github.com/docker/docker/registry"
  36. "github.com/docker/docker/runconfig"
  37. )
  38. func (b *Builder) readContext(context io.Reader) error {
  39. tmpdirPath, err := ioutil.TempDir("", "docker-build")
  40. if err != nil {
  41. return err
  42. }
  43. decompressedStream, err := archive.DecompressStream(context)
  44. if err != nil {
  45. return err
  46. }
  47. if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil {
  48. return err
  49. }
  50. if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
  51. return err
  52. }
  53. b.contextPath = tmpdirPath
  54. return nil
  55. }
  56. func (b *Builder) commit(id string, autoCmd *runconfig.Command, comment string) error {
  57. if b.disableCommit {
  58. return nil
  59. }
  60. if b.image == "" && !b.noBaseImage {
  61. return fmt.Errorf("Please provide a source image with `from` prior to commit")
  62. }
  63. b.Config.Image = b.image
  64. if id == "" {
  65. cmd := b.Config.Cmd
  66. if runtime.GOOS != "windows" {
  67. b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", "#(nop) "+comment)
  68. } else {
  69. b.Config.Cmd = runconfig.NewCommand("cmd", "/S /C", "REM (nop) "+comment)
  70. }
  71. defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
  72. hit, err := b.probeCache()
  73. if err != nil {
  74. return err
  75. }
  76. if hit {
  77. return nil
  78. }
  79. container, err := b.create()
  80. if err != nil {
  81. return err
  82. }
  83. id = container.ID
  84. if err := container.Mount(); err != nil {
  85. return err
  86. }
  87. defer container.Unmount()
  88. }
  89. container, err := b.Daemon.Get(id)
  90. if err != nil {
  91. return err
  92. }
  93. // Note: Actually copy the struct
  94. autoConfig := *b.Config
  95. autoConfig.Cmd = autoCmd
  96. commitCfg := &daemon.ContainerCommitConfig{
  97. Author: b.maintainer,
  98. Pause: true,
  99. Config: &autoConfig,
  100. }
  101. // Commit the container
  102. image, err := b.Daemon.Commit(container, commitCfg)
  103. if err != nil {
  104. return err
  105. }
  106. b.image = image.ID
  107. return nil
  108. }
  109. type copyInfo struct {
  110. origPath string
  111. destPath string
  112. hash string
  113. decompress bool
  114. tmpDir string
  115. }
  116. func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
  117. if b.context == nil {
  118. return fmt.Errorf("No context given. Impossible to use %s", cmdName)
  119. }
  120. if len(args) < 2 {
  121. return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
  122. }
  123. // Work in daemon-specific filepath semantics
  124. dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest
  125. copyInfos := []*copyInfo{}
  126. b.Config.Image = b.image
  127. defer func() {
  128. for _, ci := range copyInfos {
  129. if ci.tmpDir != "" {
  130. os.RemoveAll(ci.tmpDir)
  131. }
  132. }
  133. }()
  134. // Loop through each src file and calculate the info we need to
  135. // do the copy (e.g. hash value if cached). Don't actually do
  136. // the copy until we've looked at all src files
  137. for _, orig := range args[0 : len(args)-1] {
  138. if err := calcCopyInfo(
  139. b,
  140. cmdName,
  141. &copyInfos,
  142. orig,
  143. dest,
  144. allowRemote,
  145. allowDecompression,
  146. true,
  147. ); err != nil {
  148. return err
  149. }
  150. }
  151. if len(copyInfos) == 0 {
  152. return fmt.Errorf("No source files were specified")
  153. }
  154. if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
  155. return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
  156. }
  157. // For backwards compat, if there's just one CI then use it as the
  158. // cache look-up string, otherwise hash 'em all into one
  159. var srcHash string
  160. var origPaths string
  161. if len(copyInfos) == 1 {
  162. srcHash = copyInfos[0].hash
  163. origPaths = copyInfos[0].origPath
  164. } else {
  165. var hashs []string
  166. var origs []string
  167. for _, ci := range copyInfos {
  168. hashs = append(hashs, ci.hash)
  169. origs = append(origs, ci.origPath)
  170. }
  171. hasher := sha256.New()
  172. hasher.Write([]byte(strings.Join(hashs, ",")))
  173. srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
  174. origPaths = strings.Join(origs, " ")
  175. }
  176. cmd := b.Config.Cmd
  177. if runtime.GOOS != "windows" {
  178. b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
  179. } else {
  180. b.Config.Cmd = runconfig.NewCommand("cmd", "/S /C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
  181. }
  182. defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd)
  183. hit, err := b.probeCache()
  184. if err != nil {
  185. return err
  186. }
  187. if hit {
  188. return nil
  189. }
  190. container, _, err := b.Daemon.Create(b.Config, nil, "")
  191. if err != nil {
  192. return err
  193. }
  194. b.TmpContainers[container.ID] = struct{}{}
  195. if err := container.Mount(); err != nil {
  196. return err
  197. }
  198. defer container.Unmount()
  199. if err := container.PrepareStorage(); err != nil {
  200. return err
  201. }
  202. for _, ci := range copyInfos {
  203. if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
  204. return err
  205. }
  206. }
  207. if err := container.CleanupStorage(); err != nil {
  208. return err
  209. }
  210. if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
  211. return err
  212. }
  213. return nil
  214. }
  215. func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
  216. // Work in daemon-specific OS filepath semantics. However, we save
  217. // the the origPath passed in here, as it might also be a URL which
  218. // we need to check for in this function.
  219. passedInOrigPath := origPath
  220. origPath = filepath.FromSlash(origPath)
  221. destPath = filepath.FromSlash(destPath)
  222. if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
  223. origPath = origPath[1:]
  224. }
  225. origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
  226. // Twiddle the destPath when its a relative path - meaning, make it
  227. // relative to the WORKINGDIR
  228. if !filepath.IsAbs(destPath) {
  229. hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator))
  230. destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath)
  231. // Make sure we preserve any trailing slash
  232. if hasSlash {
  233. destPath += string(os.PathSeparator)
  234. }
  235. }
  236. // In the remote/URL case, download it and gen its hashcode
  237. if urlutil.IsURL(passedInOrigPath) {
  238. // As it's a URL, we go back to processing on what was passed in
  239. // to this function
  240. origPath = passedInOrigPath
  241. if !allowRemote {
  242. return fmt.Errorf("Source can't be a URL for %s", cmdName)
  243. }
  244. ci := copyInfo{}
  245. ci.origPath = origPath
  246. ci.hash = origPath // default to this but can change
  247. ci.destPath = destPath
  248. ci.decompress = false
  249. *cInfos = append(*cInfos, &ci)
  250. // Initiate the download
  251. resp, err := httputils.Download(ci.origPath)
  252. if err != nil {
  253. return err
  254. }
  255. // Create a tmp dir
  256. tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
  257. if err != nil {
  258. return err
  259. }
  260. ci.tmpDir = tmpDirName
  261. // Create a tmp file within our tmp dir
  262. tmpFileName := filepath.Join(tmpDirName, "tmp")
  263. tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
  264. if err != nil {
  265. return err
  266. }
  267. // Download and dump result to tmp file
  268. if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
  269. In: resp.Body,
  270. Out: b.OutOld,
  271. Formatter: b.StreamFormatter,
  272. Size: int(resp.ContentLength),
  273. NewLines: true,
  274. ID: "",
  275. Action: "Downloading",
  276. })); err != nil {
  277. tmpFile.Close()
  278. return err
  279. }
  280. fmt.Fprintf(b.OutStream, "\n")
  281. tmpFile.Close()
  282. // Set the mtime to the Last-Modified header value if present
  283. // Otherwise just remove atime and mtime
  284. times := make([]syscall.Timespec, 2)
  285. lastMod := resp.Header.Get("Last-Modified")
  286. if lastMod != "" {
  287. mTime, err := http.ParseTime(lastMod)
  288. // If we can't parse it then just let it default to 'zero'
  289. // otherwise use the parsed time value
  290. if err == nil {
  291. times[1] = syscall.NsecToTimespec(mTime.UnixNano())
  292. }
  293. }
  294. if err := system.UtimesNano(tmpFileName, times); err != nil {
  295. return err
  296. }
  297. ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
  298. // If the destination is a directory, figure out the filename.
  299. if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) {
  300. u, err := url.Parse(origPath)
  301. if err != nil {
  302. return err
  303. }
  304. path := u.Path
  305. if strings.HasSuffix(path, string(os.PathSeparator)) {
  306. path = path[:len(path)-1]
  307. }
  308. parts := strings.Split(path, string(os.PathSeparator))
  309. filename := parts[len(parts)-1]
  310. if filename == "" {
  311. return fmt.Errorf("cannot determine filename from url: %s", u)
  312. }
  313. ci.destPath = ci.destPath + filename
  314. }
  315. // Calc the checksum, even if we're using the cache
  316. r, err := archive.Tar(tmpFileName, archive.Uncompressed)
  317. if err != nil {
  318. return err
  319. }
  320. tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1)
  321. if err != nil {
  322. return err
  323. }
  324. if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
  325. return err
  326. }
  327. ci.hash = tarSum.Sum(nil)
  328. r.Close()
  329. return nil
  330. }
  331. // Deal with wildcards
  332. if allowWildcards && ContainsWildcards(origPath) {
  333. for _, fileInfo := range b.context.GetSums() {
  334. if fileInfo.Name() == "" {
  335. continue
  336. }
  337. match, _ := filepath.Match(origPath, fileInfo.Name())
  338. if !match {
  339. continue
  340. }
  341. // Note we set allowWildcards to false in case the name has
  342. // a * in it
  343. calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
  344. }
  345. return nil
  346. }
  347. // Must be a dir or a file
  348. if err := b.checkPathForAddition(origPath); err != nil {
  349. return err
  350. }
  351. fi, _ := os.Stat(filepath.Join(b.contextPath, origPath))
  352. ci := copyInfo{}
  353. ci.origPath = origPath
  354. ci.hash = origPath
  355. ci.destPath = destPath
  356. ci.decompress = allowDecompression
  357. *cInfos = append(*cInfos, &ci)
  358. // Deal with the single file case
  359. if !fi.IsDir() {
  360. // This will match first file in sums of the archive
  361. fis := b.context.GetSums().GetFile(ci.origPath)
  362. if fis != nil {
  363. ci.hash = "file:" + fis.Sum()
  364. }
  365. return nil
  366. }
  367. // Must be a dir
  368. var subfiles []string
  369. absOrigPath := filepath.Join(b.contextPath, ci.origPath)
  370. // Add a trailing / to make sure we only pick up nested files under
  371. // the dir and not sibling files of the dir that just happen to
  372. // start with the same chars
  373. if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) {
  374. absOrigPath += string(os.PathSeparator)
  375. }
  376. // Need path w/o slash too to find matching dir w/o trailing slash
  377. absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
  378. for _, fileInfo := range b.context.GetSums() {
  379. absFile := filepath.Join(b.contextPath, fileInfo.Name())
  380. // Any file in the context that starts with the given path will be
  381. // picked up and its hashcode used. However, we'll exclude the
  382. // root dir itself. We do this for a coupel of reasons:
  383. // 1 - ADD/COPY will not copy the dir itself, just its children
  384. // so there's no reason to include it in the hash calc
  385. // 2 - the metadata on the dir will change when any child file
  386. // changes. This will lead to a miss in the cache check if that
  387. // child file is in the .dockerignore list.
  388. if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
  389. subfiles = append(subfiles, fileInfo.Sum())
  390. }
  391. }
  392. sort.Strings(subfiles)
  393. hasher := sha256.New()
  394. hasher.Write([]byte(strings.Join(subfiles, ",")))
  395. ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
  396. return nil
  397. }
  398. func ContainsWildcards(name string) bool {
  399. for i := 0; i < len(name); i++ {
  400. ch := name[i]
  401. if ch == '\\' {
  402. i++
  403. } else if ch == '*' || ch == '?' || ch == '[' {
  404. return true
  405. }
  406. }
  407. return false
  408. }
  409. func (b *Builder) pullImage(name string) (*graph.Image, error) {
  410. remote, tag := parsers.ParseRepositoryTag(name)
  411. if tag == "" {
  412. tag = "latest"
  413. }
  414. pullRegistryAuth := &cliconfig.AuthConfig{}
  415. if len(b.AuthConfigs) > 0 {
  416. // The request came with a full auth config file, we prefer to use that
  417. repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote)
  418. if err != nil {
  419. return nil, err
  420. }
  421. resolvedConfig := registry.ResolveAuthConfig(
  422. &cliconfig.ConfigFile{AuthConfigs: b.AuthConfigs},
  423. repoInfo.Index,
  424. )
  425. pullRegistryAuth = &resolvedConfig
  426. }
  427. imagePullConfig := &graph.ImagePullConfig{
  428. AuthConfig: pullRegistryAuth,
  429. OutStream: ioutils.NopWriteCloser(b.OutOld),
  430. }
  431. if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
  432. return nil, err
  433. }
  434. image, err := b.Daemon.Repositories().LookupImage(name)
  435. if err != nil {
  436. return nil, err
  437. }
  438. return image, nil
  439. }
  440. func (b *Builder) processImageFrom(img *graph.Image) error {
  441. b.image = img.ID
  442. if img.Config != nil {
  443. b.Config = img.Config
  444. }
  445. // The default path will be blank on Windows (set by HCS)
  446. if len(b.Config.Env) == 0 && daemon.DefaultPathEnv != "" {
  447. b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
  448. }
  449. // Process ONBUILD triggers if they exist
  450. if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
  451. fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
  452. }
  453. // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
  454. onBuildTriggers := b.Config.OnBuild
  455. b.Config.OnBuild = []string{}
  456. // parse the ONBUILD triggers by invoking the parser
  457. for stepN, step := range onBuildTriggers {
  458. ast, err := parser.Parse(strings.NewReader(step))
  459. if err != nil {
  460. return err
  461. }
  462. for i, n := range ast.Children {
  463. switch strings.ToUpper(n.Value) {
  464. case "ONBUILD":
  465. return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
  466. case "MAINTAINER", "FROM":
  467. return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
  468. }
  469. fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
  470. if err := b.dispatch(i, n); err != nil {
  471. return err
  472. }
  473. }
  474. }
  475. return nil
  476. }
  477. // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
  478. // and if so attempts to look up the current `b.image` and `b.Config` pair
  479. // in the current server `b.Daemon`. If an image is found, probeCache returns
  480. // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
  481. // is any error, it returns `(false, err)`.
  482. func (b *Builder) probeCache() (bool, error) {
  483. if !b.UtilizeCache || b.cacheBusted {
  484. return false, nil
  485. }
  486. cache, err := b.Daemon.ImageGetCached(b.image, b.Config)
  487. if err != nil {
  488. return false, err
  489. }
  490. if cache == nil {
  491. logrus.Debugf("[BUILDER] Cache miss")
  492. b.cacheBusted = true
  493. return false, nil
  494. }
  495. fmt.Fprintf(b.OutStream, " ---> Using cache\n")
  496. logrus.Debugf("[BUILDER] Use cached version")
  497. b.image = cache.ID
  498. return true, nil
  499. }
  500. func (b *Builder) create() (*daemon.Container, error) {
  501. if b.image == "" && !b.noBaseImage {
  502. return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
  503. }
  504. b.Config.Image = b.image
  505. hostConfig := &runconfig.HostConfig{
  506. CpuShares: b.cpuShares,
  507. CpuPeriod: b.cpuPeriod,
  508. CpuQuota: b.cpuQuota,
  509. CpusetCpus: b.cpuSetCpus,
  510. CpusetMems: b.cpuSetMems,
  511. CgroupParent: b.cgroupParent,
  512. Memory: b.memory,
  513. MemorySwap: b.memorySwap,
  514. }
  515. config := *b.Config
  516. // Create the container
  517. c, warnings, err := b.Daemon.Create(b.Config, hostConfig, "")
  518. if err != nil {
  519. return nil, err
  520. }
  521. for _, warning := range warnings {
  522. fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
  523. }
  524. b.TmpContainers[c.ID] = struct{}{}
  525. fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID))
  526. if config.Cmd.Len() > 0 {
  527. // override the entry point that may have been picked up from the base image
  528. s := config.Cmd.Slice()
  529. c.Path = s[0]
  530. c.Args = s[1:]
  531. } else {
  532. config.Cmd = runconfig.NewCommand()
  533. }
  534. return c, nil
  535. }
  536. func (b *Builder) run(c *daemon.Container) error {
  537. var errCh chan error
  538. if b.Verbose {
  539. errCh = c.Attach(nil, b.OutStream, b.ErrStream)
  540. }
  541. //start the container
  542. if err := c.Start(); err != nil {
  543. return err
  544. }
  545. finished := make(chan struct{})
  546. defer close(finished)
  547. go func() {
  548. select {
  549. case <-b.cancelled:
  550. logrus.Debugln("Build cancelled, killing container:", c.ID)
  551. c.Kill()
  552. case <-finished:
  553. }
  554. }()
  555. if b.Verbose {
  556. // Block on reading output from container, stop on err or chan closed
  557. if err := <-errCh; err != nil {
  558. return err
  559. }
  560. }
  561. // Wait for it to finish
  562. if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
  563. return &jsonmessage.JSONError{
  564. Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
  565. Code: ret,
  566. }
  567. }
  568. return nil
  569. }
  570. func (b *Builder) checkPathForAddition(orig string) error {
  571. origPath := filepath.Join(b.contextPath, orig)
  572. origPath, err := filepath.EvalSymlinks(origPath)
  573. if err != nil {
  574. if os.IsNotExist(err) {
  575. return fmt.Errorf("%s: no such file or directory", orig)
  576. }
  577. return err
  578. }
  579. contextPath, err := filepath.EvalSymlinks(b.contextPath)
  580. if err != nil {
  581. return err
  582. }
  583. if !strings.HasPrefix(origPath, contextPath) {
  584. return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
  585. }
  586. if _, err := os.Stat(origPath); err != nil {
  587. if os.IsNotExist(err) {
  588. return fmt.Errorf("%s: no such file or directory", orig)
  589. }
  590. return err
  591. }
  592. return nil
  593. }
  594. func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
  595. var (
  596. err error
  597. destExists = true
  598. origPath = filepath.Join(b.contextPath, orig)
  599. destPath string
  600. )
  601. // Work in daemon-local OS specific file paths
  602. dest = filepath.FromSlash(dest)
  603. destPath, err = container.GetResourcePath(dest)
  604. if err != nil {
  605. return err
  606. }
  607. // Preserve the trailing slash
  608. if strings.HasSuffix(dest, string(os.PathSeparator)) || dest == "." {
  609. destPath = destPath + string(os.PathSeparator)
  610. }
  611. destStat, err := os.Stat(destPath)
  612. if err != nil {
  613. if !os.IsNotExist(err) {
  614. logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
  615. return err
  616. }
  617. destExists = false
  618. }
  619. fi, err := os.Stat(origPath)
  620. if err != nil {
  621. if os.IsNotExist(err) {
  622. return fmt.Errorf("%s: no such file or directory", orig)
  623. }
  624. return err
  625. }
  626. if fi.IsDir() {
  627. return copyAsDirectory(origPath, destPath, destExists)
  628. }
  629. // If we are adding a remote file (or we've been told not to decompress), do not try to untar it
  630. if decompress {
  631. // First try to unpack the source as an archive
  632. // to support the untar feature we need to clean up the path a little bit
  633. // because tar is very forgiving. First we need to strip off the archive's
  634. // filename from the path but this is only added if it does not end in slash
  635. tarDest := destPath
  636. if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
  637. tarDest = filepath.Dir(destPath)
  638. }
  639. // try to successfully untar the orig
  640. if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
  641. return nil
  642. } else if err != io.EOF {
  643. logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
  644. }
  645. }
  646. if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
  647. return err
  648. }
  649. if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
  650. return err
  651. }
  652. resPath := destPath
  653. if destExists && destStat.IsDir() {
  654. resPath = filepath.Join(destPath, filepath.Base(origPath))
  655. }
  656. return fixPermissions(origPath, resPath, 0, 0, destExists)
  657. }
  658. func copyAsDirectory(source, destination string, destExisted bool) error {
  659. if err := chrootarchive.CopyWithTar(source, destination); err != nil {
  660. return err
  661. }
  662. return fixPermissions(source, destination, 0, 0, destExisted)
  663. }
  664. func (b *Builder) clearTmp() {
  665. for c := range b.TmpContainers {
  666. rmConfig := &daemon.ContainerRmConfig{
  667. ForceRemove: true,
  668. RemoveVolume: true,
  669. }
  670. if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
  671. fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
  672. return
  673. }
  674. delete(b.TmpContainers, c)
  675. fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
  676. }
  677. }