internals.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. package dockerfile
  2. // internals for handling commands. Covers many areas and a lot of
  3. // non-contiguous functionality. Please read the comments.
  4. import (
  5. "crypto/sha256"
  6. "encoding/hex"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "net/http"
  11. "net/url"
  12. "os"
  13. "path/filepath"
  14. "runtime"
  15. "sort"
  16. "strings"
  17. "time"
  18. "github.com/Sirupsen/logrus"
  19. "github.com/docker/docker/api"
  20. "github.com/docker/docker/api/types"
  21. "github.com/docker/docker/builder"
  22. "github.com/docker/docker/builder/dockerfile/parser"
  23. "github.com/docker/docker/pkg/archive"
  24. "github.com/docker/docker/pkg/httputils"
  25. "github.com/docker/docker/pkg/ioutils"
  26. "github.com/docker/docker/pkg/jsonmessage"
  27. "github.com/docker/docker/pkg/progress"
  28. "github.com/docker/docker/pkg/streamformatter"
  29. "github.com/docker/docker/pkg/stringid"
  30. "github.com/docker/docker/pkg/stringutils"
  31. "github.com/docker/docker/pkg/system"
  32. "github.com/docker/docker/pkg/tarsum"
  33. "github.com/docker/docker/pkg/urlutil"
  34. "github.com/docker/docker/runconfig"
  35. )
  36. func (b *Builder) commit(id string, autoCmd *stringutils.StrSlice, comment string) error {
  37. if b.disableCommit {
  38. return nil
  39. }
  40. if b.image == "" && !b.noBaseImage {
  41. return fmt.Errorf("Please provide a source image with `from` prior to commit")
  42. }
  43. b.runConfig.Image = b.image
  44. if id == "" {
  45. cmd := b.runConfig.Cmd
  46. if runtime.GOOS != "windows" {
  47. b.runConfig.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", "#(nop) "+comment)
  48. } else {
  49. b.runConfig.Cmd = stringutils.NewStrSlice("cmd", "/S /C", "REM (nop) "+comment)
  50. }
  51. defer func(cmd *stringutils.StrSlice) { b.runConfig.Cmd = cmd }(cmd)
  52. hit, err := b.probeCache()
  53. if err != nil {
  54. return err
  55. } else if hit {
  56. return nil
  57. }
  58. id, err = b.create()
  59. if err != nil {
  60. return err
  61. }
  62. }
  63. // Note: Actually copy the struct
  64. autoConfig := *b.runConfig
  65. autoConfig.Cmd = autoCmd
  66. commitCfg := &types.ContainerCommitConfig{
  67. Author: b.maintainer,
  68. Pause: true,
  69. Config: &autoConfig,
  70. }
  71. // Commit the container
  72. imageID, err := b.docker.Commit(id, commitCfg)
  73. if err != nil {
  74. return err
  75. }
  76. b.image = imageID
  77. return nil
  78. }
  79. type copyInfo struct {
  80. builder.FileInfo
  81. decompress bool
  82. }
  83. func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error {
  84. if b.context == nil {
  85. return fmt.Errorf("No context given. Impossible to use %s", cmdName)
  86. }
  87. if len(args) < 2 {
  88. return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
  89. }
  90. // Work in daemon-specific filepath semantics
  91. dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest
  92. b.runConfig.Image = b.image
  93. var infos []copyInfo
  94. // Loop through each src file and calculate the info we need to
  95. // do the copy (e.g. hash value if cached). Don't actually do
  96. // the copy until we've looked at all src files
  97. var err error
  98. for _, orig := range args[0 : len(args)-1] {
  99. var fi builder.FileInfo
  100. decompress := allowLocalDecompression
  101. if urlutil.IsURL(orig) {
  102. if !allowRemote {
  103. return fmt.Errorf("Source can't be a URL for %s", cmdName)
  104. }
  105. fi, err = b.download(orig)
  106. if err != nil {
  107. return err
  108. }
  109. defer os.RemoveAll(filepath.Dir(fi.Path()))
  110. decompress = false
  111. infos = append(infos, copyInfo{fi, decompress})
  112. continue
  113. }
  114. // not a URL
  115. subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true)
  116. if err != nil {
  117. return err
  118. }
  119. infos = append(infos, subInfos...)
  120. }
  121. if len(infos) == 0 {
  122. return fmt.Errorf("No source files were specified")
  123. }
  124. if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
  125. return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
  126. }
  127. // For backwards compat, if there's just one info then use it as the
  128. // cache look-up string, otherwise hash 'em all into one
  129. var srcHash string
  130. var origPaths string
  131. if len(infos) == 1 {
  132. fi := infos[0].FileInfo
  133. origPaths = fi.Name()
  134. if hfi, ok := fi.(builder.Hashed); ok {
  135. srcHash = hfi.Hash()
  136. }
  137. } else {
  138. var hashs []string
  139. var origs []string
  140. for _, info := range infos {
  141. fi := info.FileInfo
  142. origs = append(origs, fi.Name())
  143. if hfi, ok := fi.(builder.Hashed); ok {
  144. hashs = append(hashs, hfi.Hash())
  145. }
  146. }
  147. hasher := sha256.New()
  148. hasher.Write([]byte(strings.Join(hashs, ",")))
  149. srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
  150. origPaths = strings.Join(origs, " ")
  151. }
  152. cmd := b.runConfig.Cmd
  153. if runtime.GOOS != "windows" {
  154. b.runConfig.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
  155. } else {
  156. b.runConfig.Cmd = stringutils.NewStrSlice("cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
  157. }
  158. defer func(cmd *stringutils.StrSlice) { b.runConfig.Cmd = cmd }(cmd)
  159. if hit, err := b.probeCache(); err != nil {
  160. return err
  161. } else if hit {
  162. return nil
  163. }
  164. container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig})
  165. if err != nil {
  166. return err
  167. }
  168. b.tmpContainers[container.ID] = struct{}{}
  169. comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)
  170. // Twiddle the destination when its a relative path - meaning, make it
  171. // relative to the WORKINGDIR
  172. if !system.IsAbs(dest) {
  173. hasSlash := strings.HasSuffix(dest, string(os.PathSeparator))
  174. dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest)
  175. // Make sure we preserve any trailing slash
  176. if hasSlash {
  177. dest += string(os.PathSeparator)
  178. }
  179. }
  180. for _, info := range infos {
  181. if err := b.docker.BuilderCopy(container.ID, dest, info.FileInfo, info.decompress); err != nil {
  182. return err
  183. }
  184. }
  185. return b.commit(container.ID, cmd, comment)
  186. }
  187. func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) {
  188. // get filename from URL
  189. u, err := url.Parse(srcURL)
  190. if err != nil {
  191. return
  192. }
  193. path := filepath.FromSlash(u.Path) // Ensure in platform semantics
  194. if strings.HasSuffix(path, string(os.PathSeparator)) {
  195. path = path[:len(path)-1]
  196. }
  197. parts := strings.Split(path, string(os.PathSeparator))
  198. filename := parts[len(parts)-1]
  199. if filename == "" {
  200. err = fmt.Errorf("cannot determine filename from url: %s", u)
  201. return
  202. }
  203. // Initiate the download
  204. resp, err := httputils.Download(srcURL)
  205. if err != nil {
  206. return
  207. }
  208. // Prepare file in a tmp dir
  209. tmpDir, err := ioutils.TempDir("", "docker-remote")
  210. if err != nil {
  211. return
  212. }
  213. defer func() {
  214. if err != nil {
  215. os.RemoveAll(tmpDir)
  216. }
  217. }()
  218. tmpFileName := filepath.Join(tmpDir, filename)
  219. tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
  220. if err != nil {
  221. return
  222. }
  223. stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter)
  224. progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true)
  225. progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading")
  226. // Download and dump result to tmp file
  227. if _, err = io.Copy(tmpFile, progressReader); err != nil {
  228. tmpFile.Close()
  229. return
  230. }
  231. fmt.Fprintln(b.Stdout)
  232. // ignoring error because the file was already opened successfully
  233. tmpFileSt, err := tmpFile.Stat()
  234. if err != nil {
  235. return
  236. }
  237. tmpFile.Close()
  238. // Set the mtime to the Last-Modified header value if present
  239. // Otherwise just remove atime and mtime
  240. mTime := time.Time{}
  241. lastMod := resp.Header.Get("Last-Modified")
  242. if lastMod != "" {
  243. // If we can't parse it then just let it default to 'zero'
  244. // otherwise use the parsed time value
  245. if parsedMTime, err := http.ParseTime(lastMod); err == nil {
  246. mTime = parsedMTime
  247. }
  248. }
  249. if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
  250. return
  251. }
  252. // Calc the checksum, even if we're using the cache
  253. r, err := archive.Tar(tmpFileName, archive.Uncompressed)
  254. if err != nil {
  255. return
  256. }
  257. tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1)
  258. if err != nil {
  259. return
  260. }
  261. if _, err = io.Copy(ioutil.Discard, tarSum); err != nil {
  262. return
  263. }
  264. hash := tarSum.Sum(nil)
  265. r.Close()
  266. return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil
  267. }
  268. func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) {
  269. // Work in daemon-specific OS filepath semantics
  270. origPath = filepath.FromSlash(origPath)
  271. if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
  272. origPath = origPath[1:]
  273. }
  274. origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
  275. // Deal with wildcards
  276. if allowWildcards && containsWildcards(origPath) {
  277. var copyInfos []copyInfo
  278. if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error {
  279. if err != nil {
  280. return err
  281. }
  282. if info.Name() == "" {
  283. // Why are we doing this check?
  284. return nil
  285. }
  286. if match, _ := filepath.Match(origPath, path); !match {
  287. return nil
  288. }
  289. // Note we set allowWildcards to false in case the name has
  290. // a * in it
  291. subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false)
  292. if err != nil {
  293. return err
  294. }
  295. copyInfos = append(copyInfos, subInfos...)
  296. return nil
  297. }); err != nil {
  298. return nil, err
  299. }
  300. return copyInfos, nil
  301. }
  302. // Must be a dir or a file
  303. statPath, fi, err := b.context.Stat(origPath)
  304. if err != nil {
  305. return nil, err
  306. }
  307. copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}}
  308. hfi, handleHash := fi.(builder.Hashed)
  309. if !handleHash {
  310. return copyInfos, nil
  311. }
  312. // Deal with the single file case
  313. if !fi.IsDir() {
  314. hfi.SetHash("file:" + hfi.Hash())
  315. return copyInfos, nil
  316. }
  317. // Must be a dir
  318. var subfiles []string
  319. err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error {
  320. if err != nil {
  321. return err
  322. }
  323. // we already checked handleHash above
  324. subfiles = append(subfiles, info.(builder.Hashed).Hash())
  325. return nil
  326. })
  327. if err != nil {
  328. return nil, err
  329. }
  330. sort.Strings(subfiles)
  331. hasher := sha256.New()
  332. hasher.Write([]byte(strings.Join(subfiles, ",")))
  333. hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil)))
  334. return copyInfos, nil
  335. }
  336. func containsWildcards(name string) bool {
  337. for i := 0; i < len(name); i++ {
  338. ch := name[i]
  339. if ch == '\\' {
  340. i++
  341. } else if ch == '*' || ch == '?' || ch == '[' {
  342. return true
  343. }
  344. }
  345. return false
  346. }
  347. func (b *Builder) processImageFrom(img builder.Image) error {
  348. b.image = img.ID()
  349. if img.Config != nil {
  350. b.runConfig = img.Config()
  351. }
  352. // The default path will be blank on Windows (set by HCS)
  353. if len(b.runConfig.Env) == 0 && system.DefaultPathEnv != "" {
  354. b.runConfig.Env = append(b.runConfig.Env, "PATH="+system.DefaultPathEnv)
  355. }
  356. // Process ONBUILD triggers if they exist
  357. if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 {
  358. word := "trigger"
  359. if nTriggers > 1 {
  360. word = "triggers"
  361. }
  362. fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word)
  363. }
  364. // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
  365. onBuildTriggers := b.runConfig.OnBuild
  366. b.runConfig.OnBuild = []string{}
  367. // parse the ONBUILD triggers by invoking the parser
  368. for _, step := range onBuildTriggers {
  369. ast, err := parser.Parse(strings.NewReader(step))
  370. if err != nil {
  371. return err
  372. }
  373. for i, n := range ast.Children {
  374. switch strings.ToUpper(n.Value) {
  375. case "ONBUILD":
  376. return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
  377. case "MAINTAINER", "FROM":
  378. return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
  379. }
  380. if err := b.dispatch(i, n); err != nil {
  381. return err
  382. }
  383. }
  384. }
  385. return nil
  386. }
  387. // probeCache checks if `b.docker` implements builder.ImageCache and image-caching
  388. // is enabled (`b.UseCache`).
  389. // If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`.
  390. // If an image is found, probeCache returns `(true, nil)`.
  391. // If no image is found, it returns `(false, nil)`.
  392. // If there is any error, it returns `(false, err)`.
  393. func (b *Builder) probeCache() (bool, error) {
  394. c, ok := b.docker.(builder.ImageCache)
  395. if !ok || !b.UseCache || b.cacheBusted {
  396. return false, nil
  397. }
  398. cache, err := c.GetCachedImage(b.image, b.runConfig)
  399. if err != nil {
  400. return false, err
  401. }
  402. if len(cache) == 0 {
  403. logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd)
  404. b.cacheBusted = true
  405. return false, nil
  406. }
  407. fmt.Fprintf(b.Stdout, " ---> Using cache\n")
  408. logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd)
  409. b.image = string(cache)
  410. return true, nil
  411. }
  412. func (b *Builder) create() (string, error) {
  413. if b.image == "" && !b.noBaseImage {
  414. return "", fmt.Errorf("Please provide a source image with `from` prior to run")
  415. }
  416. b.runConfig.Image = b.image
  417. resources := runconfig.Resources{
  418. CgroupParent: b.CgroupParent,
  419. CPUShares: b.CPUShares,
  420. CPUPeriod: b.CPUPeriod,
  421. CPUQuota: b.CPUQuota,
  422. CpusetCpus: b.CPUSetCpus,
  423. CpusetMems: b.CPUSetMems,
  424. Memory: b.Memory,
  425. MemorySwap: b.MemorySwap,
  426. Ulimits: b.Ulimits,
  427. }
  428. // TODO: why not embed a hostconfig in builder?
  429. hostConfig := &runconfig.HostConfig{
  430. Isolation: b.Isolation,
  431. ShmSize: b.ShmSize,
  432. Resources: resources,
  433. }
  434. config := *b.runConfig
  435. // Create the container
  436. c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{
  437. Config: b.runConfig,
  438. HostConfig: hostConfig,
  439. })
  440. if err != nil {
  441. return "", err
  442. }
  443. for _, warning := range c.Warnings {
  444. fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning)
  445. }
  446. b.tmpContainers[c.ID] = struct{}{}
  447. fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID))
  448. if config.Cmd.Len() > 0 {
  449. // override the entry point that may have been picked up from the base image
  450. if err := b.docker.ContainerUpdateCmd(c.ID, config.Cmd.Slice()); err != nil {
  451. return "", err
  452. }
  453. }
  454. return c.ID, nil
  455. }
  456. func (b *Builder) run(cID string) (err error) {
  457. errCh := make(chan error)
  458. if b.Verbose {
  459. go func() {
  460. errCh <- b.docker.ContainerAttach(cID, nil, b.Stdout, b.Stderr, true)
  461. }()
  462. }
  463. finished := make(chan struct{})
  464. defer close(finished)
  465. go func() {
  466. select {
  467. case <-b.cancelled:
  468. logrus.Debugln("Build cancelled, killing and removing container:", cID)
  469. b.docker.ContainerKill(cID, 0)
  470. b.removeContainer(cID)
  471. case <-finished:
  472. }
  473. }()
  474. if err := b.docker.ContainerStart(cID, nil); err != nil {
  475. return err
  476. }
  477. if b.Verbose {
  478. // Block on reading output from container, stop on err or chan closed
  479. if err := <-errCh; err != nil {
  480. return err
  481. }
  482. }
  483. if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 {
  484. // TODO: change error type, because jsonmessage.JSONError assumes HTTP
  485. return &jsonmessage.JSONError{
  486. Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.runConfig.Cmd.ToString(), ret),
  487. Code: ret,
  488. }
  489. }
  490. return nil
  491. }
  492. func (b *Builder) removeContainer(c string) error {
  493. rmConfig := &types.ContainerRmConfig{
  494. ForceRemove: true,
  495. RemoveVolume: true,
  496. }
  497. if err := b.docker.ContainerRm(c, rmConfig); err != nil {
  498. fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
  499. return err
  500. }
  501. return nil
  502. }
  503. func (b *Builder) clearTmp() {
  504. for c := range b.tmpContainers {
  505. if err := b.removeContainer(c); err != nil {
  506. return
  507. }
  508. delete(b.tmpContainers, c)
  509. fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c))
  510. }
  511. }
  512. // readDockerfile reads a Dockerfile from the current context.
  513. func (b *Builder) readDockerfile() error {
  514. // If no -f was specified then look for 'Dockerfile'. If we can't find
  515. // that then look for 'dockerfile'. If neither are found then default
  516. // back to 'Dockerfile' and use that in the error message.
  517. if b.DockerfileName == "" {
  518. b.DockerfileName = api.DefaultDockerfileName
  519. if _, _, err := b.context.Stat(b.DockerfileName); os.IsNotExist(err) {
  520. lowercase := strings.ToLower(b.DockerfileName)
  521. if _, _, err := b.context.Stat(lowercase); err == nil {
  522. b.DockerfileName = lowercase
  523. }
  524. }
  525. }
  526. f, err := b.context.Open(b.DockerfileName)
  527. if err != nil {
  528. if os.IsNotExist(err) {
  529. return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.DockerfileName)
  530. }
  531. return err
  532. }
  533. if f, ok := f.(*os.File); ok {
  534. // ignoring error because Open already succeeded
  535. fi, err := f.Stat()
  536. if err != nil {
  537. return fmt.Errorf("Unexpected error reading Dockerfile: %v", err)
  538. }
  539. if fi.Size() == 0 {
  540. return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.DockerfileName)
  541. }
  542. }
  543. b.dockerfile, err = parser.Parse(f)
  544. f.Close()
  545. if err != nil {
  546. return err
  547. }
  548. // After the Dockerfile has been parsed, we need to check the .dockerignore
  549. // file for either "Dockerfile" or ".dockerignore", and if either are
  550. // present then erase them from the build context. These files should never
  551. // have been sent from the client but we did send them to make sure that
  552. // we had the Dockerfile to actually parse, and then we also need the
  553. // .dockerignore file to know whether either file should be removed.
  554. // Note that this assumes the Dockerfile has been read into memory and
  555. // is now safe to be removed.
  556. if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok {
  557. dockerIgnore.Process([]string{b.DockerfileName})
  558. }
  559. return nil
  560. }
  561. // determine if build arg is part of built-in args or user
  562. // defined args in Dockerfile at any point in time.
  563. func (b *Builder) isBuildArgAllowed(arg string) bool {
  564. if _, ok := BuiltinAllowedBuildArgs[arg]; ok {
  565. return true
  566. }
  567. if _, ok := b.allowedBuildArgs[arg]; ok {
  568. return true
  569. }
  570. return false
  571. }