internals.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. package dockerfile
  2. // internals for handling commands. Covers many areas and a lot of
  3. // non-contiguous functionality. Please read the comments.
  4. import (
  5. "crypto/sha256"
  6. "encoding/hex"
  7. "fmt"
  8. "io"
  9. "net/http"
  10. "net/url"
  11. "os"
  12. "path/filepath"
  13. "runtime"
  14. "sort"
  15. "strings"
  16. "time"
  17. "github.com/Sirupsen/logrus"
  18. "github.com/docker/docker/api/types"
  19. "github.com/docker/docker/api/types/backend"
  20. "github.com/docker/docker/api/types/container"
  21. "github.com/docker/docker/builder"
  22. "github.com/docker/docker/builder/remotecontext"
  23. "github.com/docker/docker/pkg/httputils"
  24. "github.com/docker/docker/pkg/ioutils"
  25. "github.com/docker/docker/pkg/jsonmessage"
  26. "github.com/docker/docker/pkg/progress"
  27. "github.com/docker/docker/pkg/streamformatter"
  28. "github.com/docker/docker/pkg/stringid"
  29. "github.com/docker/docker/pkg/system"
  30. "github.com/docker/docker/pkg/urlutil"
  31. "github.com/pkg/errors"
  32. )
  33. func (b *Builder) commit(dispatchState *dispatchState, comment string) error {
  34. if b.disableCommit {
  35. return nil
  36. }
  37. if !dispatchState.hasFromImage() {
  38. return errors.New("Please provide a source image with `from` prior to commit")
  39. }
  40. runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment))
  41. hit, err := b.probeCache(dispatchState, runConfigWithCommentCmd)
  42. if err != nil || hit {
  43. return err
  44. }
  45. id, err := b.create(runConfigWithCommentCmd)
  46. if err != nil {
  47. return err
  48. }
  49. return b.commitContainer(dispatchState, id, runConfigWithCommentCmd)
  50. }
  51. // TODO: see if any args can be dropped
  52. func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error {
  53. if b.disableCommit {
  54. return nil
  55. }
  56. commitCfg := &backend.ContainerCommitConfig{
  57. ContainerCommitConfig: types.ContainerCommitConfig{
  58. Author: dispatchState.maintainer,
  59. Pause: true,
  60. // TODO: this should be done by Commit()
  61. Config: copyRunConfig(dispatchState.runConfig),
  62. },
  63. ContainerConfig: containerConfig,
  64. }
  65. // Commit the container
  66. imageID, err := b.docker.Commit(id, commitCfg)
  67. if err != nil {
  68. return err
  69. }
  70. dispatchState.imageID = imageID
  71. b.imageContexts.update(imageID, dispatchState.runConfig)
  72. return nil
  73. }
  74. type copyInfo struct {
  75. root string
  76. path string
  77. hash string
  78. decompress bool
  79. }
  80. // TODO: this needs to be split so that a Builder method doesn't accept req
  81. func (b *Builder) runContextCommand(req dispatchRequest, allowRemote bool, allowLocalDecompression bool, cmdName string, imageSource *imageMount) error {
  82. args := req.args
  83. if len(args) < 2 {
  84. return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
  85. }
  86. // Work in daemon-specific filepath semantics
  87. dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest
  88. var infos []copyInfo
  89. // Loop through each src file and calculate the info we need to
  90. // do the copy (e.g. hash value if cached). Don't actually do
  91. // the copy until we've looked at all src files
  92. var err error
  93. for _, orig := range args[0 : len(args)-1] {
  94. if urlutil.IsURL(orig) {
  95. if !allowRemote {
  96. return fmt.Errorf("Source can't be a URL for %s", cmdName)
  97. }
  98. remote, path, err := b.download(orig)
  99. if err != nil {
  100. return err
  101. }
  102. defer os.RemoveAll(remote.Root())
  103. h, err := remote.Hash(path)
  104. if err != nil {
  105. return err
  106. }
  107. infos = append(infos, copyInfo{
  108. root: remote.Root(),
  109. path: path,
  110. hash: h,
  111. })
  112. continue
  113. }
  114. // not a URL
  115. subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true, imageSource)
  116. if err != nil {
  117. return err
  118. }
  119. infos = append(infos, subInfos...)
  120. }
  121. if len(infos) == 0 {
  122. return errors.New("No source files were specified")
  123. }
  124. if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
  125. return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
  126. }
  127. // For backwards compat, if there's just one info then use it as the
  128. // cache look-up string, otherwise hash 'em all into one
  129. var srcHash string
  130. if len(infos) == 1 {
  131. info := infos[0]
  132. srcHash = info.hash
  133. } else {
  134. var hashs []string
  135. var origs []string
  136. for _, info := range infos {
  137. origs = append(origs, info.path)
  138. hashs = append(hashs, info.hash)
  139. }
  140. hasher := sha256.New()
  141. hasher.Write([]byte(strings.Join(hashs, ",")))
  142. srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
  143. }
  144. // TODO: should this have been using origPaths instead of srcHash in the comment?
  145. runConfigWithCommentCmd := copyRunConfig(
  146. req.state.runConfig,
  147. withCmdCommentString(fmt.Sprintf("%s %s in %s ", cmdName, srcHash, dest)))
  148. if hit, err := b.probeCache(req.state, runConfigWithCommentCmd); err != nil || hit {
  149. return err
  150. }
  151. container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{
  152. Config: runConfigWithCommentCmd,
  153. // Set a log config to override any default value set on the daemon
  154. HostConfig: &container.HostConfig{LogConfig: defaultLogConfig},
  155. })
  156. if err != nil {
  157. return err
  158. }
  159. b.tmpContainers[container.ID] = struct{}{}
  160. // Twiddle the destination when it's a relative path - meaning, make it
  161. // relative to the WORKINGDIR
  162. if dest, err = normaliseDest(cmdName, req.state.runConfig.WorkingDir, dest); err != nil {
  163. return err
  164. }
  165. for _, info := range infos {
  166. if err := b.docker.CopyOnBuild(container.ID, dest, info.root, info.path, info.decompress); err != nil {
  167. return err
  168. }
  169. }
  170. return b.commitContainer(req.state, container.ID, runConfigWithCommentCmd)
  171. }
  172. type runConfigModifier func(*container.Config)
  173. func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config {
  174. copy := *runConfig
  175. for _, modifier := range modifiers {
  176. modifier(&copy)
  177. }
  178. return &copy
  179. }
  180. func withCmd(cmd []string) runConfigModifier {
  181. return func(runConfig *container.Config) {
  182. runConfig.Cmd = cmd
  183. }
  184. }
  185. // withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for
  186. // why there are two almost identical versions of this.
  187. func withCmdComment(comment string) runConfigModifier {
  188. return func(runConfig *container.Config) {
  189. runConfig.Cmd = append(getShell(runConfig), "#(nop) ", comment)
  190. }
  191. }
  192. // withCmdCommentString exists to maintain compatibility with older versions.
  193. // A few instructions (workdir, copy, add) used a nop comment that is a single arg
  194. // where as all the other instructions used a two arg comment string. This
  195. // function implements the single arg version.
  196. func withCmdCommentString(comment string) runConfigModifier {
  197. return func(runConfig *container.Config) {
  198. runConfig.Cmd = append(getShell(runConfig), "#(nop) "+comment)
  199. }
  200. }
  201. func withEnv(env []string) runConfigModifier {
  202. return func(runConfig *container.Config) {
  203. runConfig.Env = env
  204. }
  205. }
  206. // withEntrypointOverride sets an entrypoint on runConfig if the command is
  207. // not empty. The entrypoint is left unmodified if command is empty.
  208. //
  209. // The dockerfile RUN instruction expect to run without an entrypoint
  210. // so the runConfig entrypoint needs to be modified accordingly. ContainerCreate
  211. // will change a []string{""} entrypoint to nil, so we probe the cache with the
  212. // nil entrypoint.
  213. func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier {
  214. return func(runConfig *container.Config) {
  215. if len(cmd) > 0 {
  216. runConfig.Entrypoint = entrypoint
  217. }
  218. }
  219. }
  220. // getShell is a helper function which gets the right shell for prefixing the
  221. // shell-form of RUN, ENTRYPOINT and CMD instructions
  222. func getShell(c *container.Config) []string {
  223. if 0 == len(c.Shell) {
  224. return append([]string{}, defaultShell[:]...)
  225. }
  226. return append([]string{}, c.Shell[:]...)
  227. }
  228. func (b *Builder) download(srcURL string) (remote builder.Source, p string, err error) {
  229. // get filename from URL
  230. u, err := url.Parse(srcURL)
  231. if err != nil {
  232. return
  233. }
  234. path := filepath.FromSlash(u.Path) // Ensure in platform semantics
  235. if strings.HasSuffix(path, string(os.PathSeparator)) {
  236. path = path[:len(path)-1]
  237. }
  238. parts := strings.Split(path, string(os.PathSeparator))
  239. filename := parts[len(parts)-1]
  240. if filename == "" {
  241. err = fmt.Errorf("cannot determine filename from url: %s", u)
  242. return
  243. }
  244. // Initiate the download
  245. resp, err := httputils.Download(srcURL)
  246. if err != nil {
  247. return
  248. }
  249. // Prepare file in a tmp dir
  250. tmpDir, err := ioutils.TempDir("", "docker-remote")
  251. if err != nil {
  252. return
  253. }
  254. defer func() {
  255. if err != nil {
  256. os.RemoveAll(tmpDir)
  257. }
  258. }()
  259. tmpFileName := filepath.Join(tmpDir, filename)
  260. tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
  261. if err != nil {
  262. return
  263. }
  264. progressOutput := streamformatter.NewJSONProgressOutput(b.Output, true)
  265. progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading")
  266. // Download and dump result to tmp file
  267. // TODO: add filehash directly
  268. if _, err = io.Copy(tmpFile, progressReader); err != nil {
  269. tmpFile.Close()
  270. return
  271. }
  272. fmt.Fprintln(b.Stdout)
  273. // Set the mtime to the Last-Modified header value if present
  274. // Otherwise just remove atime and mtime
  275. mTime := time.Time{}
  276. lastMod := resp.Header.Get("Last-Modified")
  277. if lastMod != "" {
  278. // If we can't parse it then just let it default to 'zero'
  279. // otherwise use the parsed time value
  280. if parsedMTime, err := http.ParseTime(lastMod); err == nil {
  281. mTime = parsedMTime
  282. }
  283. }
  284. tmpFile.Close()
  285. if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
  286. return
  287. }
  288. lc, err := remotecontext.NewLazyContext(tmpDir)
  289. if err != nil {
  290. return
  291. }
  292. return lc, filename, nil
  293. }
  294. var windowsBlacklist = map[string]bool{
  295. "c:\\": true,
  296. "c:\\windows": true,
  297. }
  298. func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool, imageSource *imageMount) ([]copyInfo, error) {
  299. // Work in daemon-specific OS filepath semantics
  300. origPath = filepath.FromSlash(origPath)
  301. // validate windows paths from other images
  302. if imageSource != nil && runtime.GOOS == "windows" {
  303. p := strings.ToLower(filepath.Clean(origPath))
  304. if !filepath.IsAbs(p) {
  305. if filepath.VolumeName(p) != "" {
  306. if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
  307. p = p[:len(p)-1]
  308. }
  309. p += "\\"
  310. } else {
  311. p = filepath.Join("c:\\", p)
  312. }
  313. }
  314. if _, blacklisted := windowsBlacklist[p]; blacklisted {
  315. return nil, errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
  316. }
  317. }
  318. if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
  319. origPath = origPath[1:]
  320. }
  321. origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
  322. source := b.source
  323. var err error
  324. if imageSource != nil {
  325. source, err = imageSource.context()
  326. if err != nil {
  327. return nil, errors.Wrapf(err, "failed to copy")
  328. }
  329. }
  330. if source == nil {
  331. return nil, errors.Errorf("No context given. Impossible to use %s", cmdName)
  332. }
  333. // Deal with wildcards
  334. if allowWildcards && containsWildcards(origPath) {
  335. var copyInfos []copyInfo
  336. if err := filepath.Walk(source.Root(), func(path string, info os.FileInfo, err error) error {
  337. if err != nil {
  338. return err
  339. }
  340. rel, err := remotecontext.Rel(source.Root(), path)
  341. if err != nil {
  342. return err
  343. }
  344. if rel == "." {
  345. return nil
  346. }
  347. if match, _ := filepath.Match(origPath, rel); !match {
  348. return nil
  349. }
  350. // Note we set allowWildcards to false in case the name has
  351. // a * in it
  352. subInfos, err := b.calcCopyInfo(cmdName, rel, allowLocalDecompression, false, imageSource)
  353. if err != nil {
  354. return err
  355. }
  356. copyInfos = append(copyInfos, subInfos...)
  357. return nil
  358. }); err != nil {
  359. return nil, err
  360. }
  361. return copyInfos, nil
  362. }
  363. // Must be a dir or a file
  364. hash, err := source.Hash(origPath)
  365. if err != nil {
  366. return nil, err
  367. }
  368. fi, err := remotecontext.StatAt(source, origPath)
  369. if err != nil {
  370. return nil, err
  371. }
  372. // TODO: remove, handle dirs in Hash()
  373. copyInfos := []copyInfo{{root: source.Root(), path: origPath, hash: hash, decompress: allowLocalDecompression}}
  374. if imageSource != nil {
  375. // fast-cache based on imageID
  376. if h, ok := b.imageContexts.getCache(imageSource.id, origPath); ok {
  377. copyInfos[0].hash = h.(string)
  378. return copyInfos, nil
  379. }
  380. }
  381. // Deal with the single file case
  382. if !fi.IsDir() {
  383. copyInfos[0].hash = "file:" + copyInfos[0].hash
  384. return copyInfos, nil
  385. }
  386. fp, err := remotecontext.FullPath(source, origPath)
  387. if err != nil {
  388. return nil, err
  389. }
  390. // Must be a dir
  391. var subfiles []string
  392. err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error {
  393. if err != nil {
  394. return err
  395. }
  396. rel, err := remotecontext.Rel(source.Root(), path)
  397. if err != nil {
  398. return err
  399. }
  400. if rel == "." {
  401. return nil
  402. }
  403. hash, err := source.Hash(rel)
  404. if err != nil {
  405. return nil
  406. }
  407. // we already checked handleHash above
  408. subfiles = append(subfiles, hash)
  409. return nil
  410. })
  411. if err != nil {
  412. return nil, err
  413. }
  414. sort.Strings(subfiles)
  415. hasher := sha256.New()
  416. hasher.Write([]byte(strings.Join(subfiles, ",")))
  417. copyInfos[0].hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
  418. if imageSource != nil {
  419. b.imageContexts.setCache(imageSource.id, origPath, copyInfos[0].hash)
  420. }
  421. return copyInfos, nil
  422. }
  423. // probeCache checks if cache match can be found for current build instruction.
  424. // If an image is found, probeCache returns `(true, nil)`.
  425. // If no image is found, it returns `(false, nil)`.
  426. // If there is any error, it returns `(false, err)`.
  427. func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) {
  428. c := b.imageCache
  429. if c == nil || b.options.NoCache || b.cacheBusted {
  430. return false, nil
  431. }
  432. cache, err := c.GetCache(dispatchState.imageID, runConfig)
  433. if err != nil {
  434. return false, err
  435. }
  436. if len(cache) == 0 {
  437. logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd)
  438. b.cacheBusted = true
  439. return false, nil
  440. }
  441. fmt.Fprint(b.Stdout, " ---> Using cache\n")
  442. logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd)
  443. dispatchState.imageID = string(cache)
  444. b.imageContexts.update(dispatchState.imageID, runConfig)
  445. return true, nil
  446. }
  447. func (b *Builder) create(runConfig *container.Config) (string, error) {
  448. resources := container.Resources{
  449. CgroupParent: b.options.CgroupParent,
  450. CPUShares: b.options.CPUShares,
  451. CPUPeriod: b.options.CPUPeriod,
  452. CPUQuota: b.options.CPUQuota,
  453. CpusetCpus: b.options.CPUSetCPUs,
  454. CpusetMems: b.options.CPUSetMems,
  455. Memory: b.options.Memory,
  456. MemorySwap: b.options.MemorySwap,
  457. Ulimits: b.options.Ulimits,
  458. }
  459. // TODO: why not embed a hostconfig in builder?
  460. hostConfig := &container.HostConfig{
  461. SecurityOpt: b.options.SecurityOpt,
  462. Isolation: b.options.Isolation,
  463. ShmSize: b.options.ShmSize,
  464. Resources: resources,
  465. NetworkMode: container.NetworkMode(b.options.NetworkMode),
  466. // Set a log config to override any default value set on the daemon
  467. LogConfig: defaultLogConfig,
  468. ExtraHosts: b.options.ExtraHosts,
  469. }
  470. // Create the container
  471. c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{
  472. Config: runConfig,
  473. HostConfig: hostConfig,
  474. })
  475. if err != nil {
  476. return "", err
  477. }
  478. for _, warning := range c.Warnings {
  479. fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning)
  480. }
  481. b.tmpContainers[c.ID] = struct{}{}
  482. fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID))
  483. return c.ID, nil
  484. }
  485. var errCancelled = errors.New("build cancelled")
  486. func (b *Builder) run(cID string, cmd []string) (err error) {
  487. attached := make(chan struct{})
  488. errCh := make(chan error)
  489. go func() {
  490. errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true, attached)
  491. }()
  492. select {
  493. case err := <-errCh:
  494. return err
  495. case <-attached:
  496. }
  497. finished := make(chan struct{})
  498. cancelErrCh := make(chan error, 1)
  499. go func() {
  500. select {
  501. case <-b.clientCtx.Done():
  502. logrus.Debugln("Build cancelled, killing and removing container:", cID)
  503. b.docker.ContainerKill(cID, 0)
  504. b.removeContainer(cID)
  505. cancelErrCh <- errCancelled
  506. case <-finished:
  507. cancelErrCh <- nil
  508. }
  509. }()
  510. if err := b.docker.ContainerStart(cID, nil, "", ""); err != nil {
  511. close(finished)
  512. if cancelErr := <-cancelErrCh; cancelErr != nil {
  513. logrus.Debugf("Build cancelled (%v) and got an error from ContainerStart: %v",
  514. cancelErr, err)
  515. }
  516. return err
  517. }
  518. // Block on reading output from container, stop on err or chan closed
  519. if err := <-errCh; err != nil {
  520. close(finished)
  521. if cancelErr := <-cancelErrCh; cancelErr != nil {
  522. logrus.Debugf("Build cancelled (%v) and got an error from errCh: %v",
  523. cancelErr, err)
  524. }
  525. return err
  526. }
  527. if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 {
  528. close(finished)
  529. if cancelErr := <-cancelErrCh; cancelErr != nil {
  530. logrus.Debugf("Build cancelled (%v) and got a non-zero code from ContainerWait: %d",
  531. cancelErr, ret)
  532. }
  533. // TODO: change error type, because jsonmessage.JSONError assumes HTTP
  534. return &jsonmessage.JSONError{
  535. Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(cmd, " "), ret),
  536. Code: ret,
  537. }
  538. }
  539. close(finished)
  540. return <-cancelErrCh
  541. }
  542. func (b *Builder) removeContainer(c string) error {
  543. rmConfig := &types.ContainerRmConfig{
  544. ForceRemove: true,
  545. RemoveVolume: true,
  546. }
  547. if err := b.docker.ContainerRm(c, rmConfig); err != nil {
  548. fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
  549. return err
  550. }
  551. return nil
  552. }
  553. func (b *Builder) clearTmp() {
  554. for c := range b.tmpContainers {
  555. if err := b.removeContainer(c); err != nil {
  556. return
  557. }
  558. delete(b.tmpContainers, c)
  559. fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c))
  560. }
  561. }