internals.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. package builder
  2. // internals for handling commands. Covers many areas and a lot of
  3. // non-contiguous functionality. Please read the comments.
  4. import (
  5. "crypto/sha256"
  6. "encoding/hex"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "net/url"
  11. "os"
  12. "path"
  13. "path/filepath"
  14. "sort"
  15. "strings"
  16. "syscall"
  17. "time"
  18. "github.com/docker/docker/builder/parser"
  19. "github.com/docker/docker/daemon"
  20. imagepkg "github.com/docker/docker/image"
  21. "github.com/docker/docker/pkg/archive"
  22. "github.com/docker/docker/pkg/log"
  23. "github.com/docker/docker/pkg/parsers"
  24. "github.com/docker/docker/pkg/promise"
  25. "github.com/docker/docker/pkg/symlink"
  26. "github.com/docker/docker/pkg/system"
  27. "github.com/docker/docker/pkg/tarsum"
  28. "github.com/docker/docker/registry"
  29. "github.com/docker/docker/utils"
  30. )
  31. func (b *Builder) readContext(context io.Reader) error {
  32. tmpdirPath, err := ioutil.TempDir("", "docker-build")
  33. if err != nil {
  34. return err
  35. }
  36. decompressedStream, err := archive.DecompressStream(context)
  37. if err != nil {
  38. return err
  39. }
  40. if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
  41. return err
  42. }
  43. if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
  44. return err
  45. }
  46. b.contextPath = tmpdirPath
  47. return nil
  48. }
  49. func (b *Builder) commit(id string, autoCmd []string, comment string) error {
  50. if b.image == "" {
  51. return fmt.Errorf("Please provide a source image with `from` prior to commit")
  52. }
  53. b.Config.Image = b.image
  54. if id == "" {
  55. cmd := b.Config.Cmd
  56. b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
  57. defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
  58. hit, err := b.probeCache()
  59. if err != nil {
  60. return err
  61. }
  62. if hit {
  63. return nil
  64. }
  65. container, err := b.create()
  66. if err != nil {
  67. return err
  68. }
  69. id = container.ID
  70. if err := container.Mount(); err != nil {
  71. return err
  72. }
  73. defer container.Unmount()
  74. }
  75. container := b.Daemon.Get(id)
  76. if container == nil {
  77. return fmt.Errorf("An error occured while creating the container")
  78. }
  79. // Note: Actually copy the struct
  80. autoConfig := *b.Config
  81. autoConfig.Cmd = autoCmd
  82. // Commit the container
  83. image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
  84. if err != nil {
  85. return err
  86. }
  87. b.image = image.ID
  88. return nil
  89. }
  90. type copyInfo struct {
  91. origPath string
  92. destPath string
  93. hash string
  94. decompress bool
  95. tmpDir string
  96. }
  97. func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
  98. if b.context == nil {
  99. return fmt.Errorf("No context given. Impossible to use %s", cmdName)
  100. }
  101. if len(args) < 2 {
  102. return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
  103. }
  104. dest := args[len(args)-1] // last one is always the dest
  105. copyInfos := []*copyInfo{}
  106. b.Config.Image = b.image
  107. defer func() {
  108. for _, ci := range copyInfos {
  109. if ci.tmpDir != "" {
  110. os.RemoveAll(ci.tmpDir)
  111. }
  112. }
  113. }()
  114. // Loop through each src file and calculate the info we need to
  115. // do the copy (e.g. hash value if cached). Don't actually do
  116. // the copy until we've looked at all src files
  117. for _, orig := range args[0 : len(args)-1] {
  118. err := calcCopyInfo(b, cmdName, &copyInfos, orig, dest, allowRemote, allowDecompression)
  119. if err != nil {
  120. return err
  121. }
  122. }
  123. if len(copyInfos) == 0 {
  124. return fmt.Errorf("No source files were specified")
  125. }
  126. if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") {
  127. return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
  128. }
  129. // For backwards compat, if there's just one CI then use it as the
  130. // cache look-up string, otherwise hash 'em all into one
  131. var srcHash string
  132. var origPaths string
  133. if len(copyInfos) == 1 {
  134. srcHash = copyInfos[0].hash
  135. origPaths = copyInfos[0].origPath
  136. } else {
  137. var hashs []string
  138. var origs []string
  139. for _, ci := range copyInfos {
  140. hashs = append(hashs, ci.hash)
  141. origs = append(origs, ci.origPath)
  142. }
  143. hasher := sha256.New()
  144. hasher.Write([]byte(strings.Join(hashs, ",")))
  145. srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
  146. origPaths = strings.Join(origs, " ")
  147. }
  148. cmd := b.Config.Cmd
  149. b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)}
  150. defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
  151. hit, err := b.probeCache()
  152. if err != nil {
  153. return err
  154. }
  155. // If we do not have at least one hash, never use the cache
  156. if hit && b.UtilizeCache {
  157. return nil
  158. }
  159. container, _, err := b.Daemon.Create(b.Config, nil, "")
  160. if err != nil {
  161. return err
  162. }
  163. b.TmpContainers[container.ID] = struct{}{}
  164. if err := container.Mount(); err != nil {
  165. return err
  166. }
  167. defer container.Unmount()
  168. for _, ci := range copyInfos {
  169. if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
  170. return err
  171. }
  172. }
  173. if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
  174. return err
  175. }
  176. return nil
  177. }
  178. func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
  179. if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
  180. origPath = origPath[1:]
  181. }
  182. origPath = strings.TrimPrefix(origPath, "./")
  183. // In the remote/URL case, download it and gen its hashcode
  184. if utils.IsURL(origPath) {
  185. if !allowRemote {
  186. return fmt.Errorf("Source can't be a URL for %s", cmdName)
  187. }
  188. ci := copyInfo{}
  189. ci.origPath = origPath
  190. ci.hash = origPath // default to this but can change
  191. ci.destPath = destPath
  192. ci.decompress = false
  193. *cInfos = append(*cInfos, &ci)
  194. // Initiate the download
  195. resp, err := utils.Download(ci.origPath)
  196. if err != nil {
  197. return err
  198. }
  199. // Create a tmp dir
  200. tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
  201. if err != nil {
  202. return err
  203. }
  204. ci.tmpDir = tmpDirName
  205. // Create a tmp file within our tmp dir
  206. tmpFileName := path.Join(tmpDirName, "tmp")
  207. tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
  208. if err != nil {
  209. return err
  210. }
  211. // Download and dump result to tmp file
  212. if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil {
  213. tmpFile.Close()
  214. return err
  215. }
  216. fmt.Fprintf(b.OutStream, "\n")
  217. tmpFile.Close()
  218. // Remove the mtime of the newly created tmp file
  219. if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
  220. return err
  221. }
  222. ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
  223. // If the destination is a directory, figure out the filename.
  224. if strings.HasSuffix(ci.destPath, "/") {
  225. u, err := url.Parse(origPath)
  226. if err != nil {
  227. return err
  228. }
  229. path := u.Path
  230. if strings.HasSuffix(path, "/") {
  231. path = path[:len(path)-1]
  232. }
  233. parts := strings.Split(path, "/")
  234. filename := parts[len(parts)-1]
  235. if filename == "" {
  236. return fmt.Errorf("cannot determine filename from url: %s", u)
  237. }
  238. ci.destPath = ci.destPath + filename
  239. }
  240. // Calc the checksum, only if we're using the cache
  241. if b.UtilizeCache {
  242. r, err := archive.Tar(tmpFileName, archive.Uncompressed)
  243. if err != nil {
  244. return err
  245. }
  246. tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
  247. if err != nil {
  248. return err
  249. }
  250. if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
  251. return err
  252. }
  253. ci.hash = tarSum.Sum(nil)
  254. r.Close()
  255. }
  256. return nil
  257. }
  258. // Deal with wildcards
  259. if ContainsWildcards(origPath) {
  260. for _, fileInfo := range b.context.GetSums() {
  261. if fileInfo.Name() == "" {
  262. continue
  263. }
  264. match, _ := path.Match(origPath, fileInfo.Name())
  265. if !match {
  266. continue
  267. }
  268. calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
  269. }
  270. return nil
  271. }
  272. // Must be a dir or a file
  273. if err := b.checkPathForAddition(origPath); err != nil {
  274. return err
  275. }
  276. fi, _ := os.Stat(path.Join(b.contextPath, origPath))
  277. ci := copyInfo{}
  278. ci.origPath = origPath
  279. ci.hash = origPath
  280. ci.destPath = destPath
  281. ci.decompress = allowDecompression
  282. *cInfos = append(*cInfos, &ci)
  283. // If not using cache don't need to do anything else.
  284. // If we are using a cache then calc the hash for the src file/dir
  285. if !b.UtilizeCache {
  286. return nil
  287. }
  288. // Deal with the single file case
  289. if !fi.IsDir() {
  290. // This will match first file in sums of the archive
  291. fis := b.context.GetSums().GetFile(ci.origPath)
  292. if fis != nil {
  293. ci.hash = "file:" + fis.Sum()
  294. }
  295. return nil
  296. }
  297. // Must be a dir
  298. var subfiles []string
  299. absOrigPath := path.Join(b.contextPath, ci.origPath)
  300. // Add a trailing / to make sure we only pick up nested files under
  301. // the dir and not sibling files of the dir that just happen to
  302. // start with the same chars
  303. if !strings.HasSuffix(absOrigPath, "/") {
  304. absOrigPath += "/"
  305. }
  306. // Need path w/o / too to find matching dir w/o trailing /
  307. absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
  308. for _, fileInfo := range b.context.GetSums() {
  309. absFile := path.Join(b.contextPath, fileInfo.Name())
  310. if strings.HasPrefix(absFile, absOrigPath) || absFile == absOrigPathNoSlash {
  311. subfiles = append(subfiles, fileInfo.Sum())
  312. }
  313. }
  314. sort.Strings(subfiles)
  315. hasher := sha256.New()
  316. hasher.Write([]byte(strings.Join(subfiles, ",")))
  317. ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
  318. return nil
  319. }
  320. func ContainsWildcards(name string) bool {
  321. for i := 0; i < len(name); i++ {
  322. ch := name[i]
  323. if ch == '\\' {
  324. i++
  325. } else if ch == '*' || ch == '?' || ch == '[' {
  326. return true
  327. }
  328. }
  329. return false
  330. }
  331. func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
  332. remote, tag := parsers.ParseRepositoryTag(name)
  333. if tag == "" {
  334. tag = "latest"
  335. }
  336. pullRegistryAuth := b.AuthConfig
  337. if len(b.AuthConfigFile.Configs) > 0 {
  338. // The request came with a full auth config file, we prefer to use that
  339. endpoint, _, err := registry.ResolveRepositoryName(remote)
  340. if err != nil {
  341. return nil, err
  342. }
  343. resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(endpoint)
  344. pullRegistryAuth = &resolvedAuth
  345. }
  346. job := b.Engine.Job("pull", remote, tag)
  347. job.SetenvBool("json", b.StreamFormatter.Json())
  348. job.SetenvBool("parallel", true)
  349. job.SetenvJson("authConfig", pullRegistryAuth)
  350. job.Stdout.Add(b.OutOld)
  351. if err := job.Run(); err != nil {
  352. return nil, err
  353. }
  354. image, err := b.Daemon.Repositories().LookupImage(name)
  355. if err != nil {
  356. return nil, err
  357. }
  358. return image, nil
  359. }
  360. func (b *Builder) processImageFrom(img *imagepkg.Image) error {
  361. b.image = img.ID
  362. if img.Config != nil {
  363. b.Config = img.Config
  364. }
  365. if len(b.Config.Env) == 0 {
  366. b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
  367. }
  368. // Process ONBUILD triggers if they exist
  369. if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
  370. fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
  371. }
  372. // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
  373. onBuildTriggers := b.Config.OnBuild
  374. b.Config.OnBuild = []string{}
  375. // parse the ONBUILD triggers by invoking the parser
  376. for stepN, step := range onBuildTriggers {
  377. ast, err := parser.Parse(strings.NewReader(step))
  378. if err != nil {
  379. return err
  380. }
  381. for i, n := range ast.Children {
  382. switch strings.ToUpper(n.Value) {
  383. case "ONBUILD":
  384. return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
  385. case "MAINTAINER", "FROM":
  386. return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
  387. }
  388. fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
  389. if err := b.dispatch(i, n); err != nil {
  390. return err
  391. }
  392. }
  393. }
  394. return nil
  395. }
  396. // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
  397. // and if so attempts to look up the current `b.image` and `b.Config` pair
  398. // in the current server `b.Daemon`. If an image is found, probeCache returns
  399. // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
  400. // is any error, it returns `(false, err)`.
  401. func (b *Builder) probeCache() (bool, error) {
  402. if b.UtilizeCache {
  403. if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil {
  404. return false, err
  405. } else if cache != nil {
  406. fmt.Fprintf(b.OutStream, " ---> Using cache\n")
  407. log.Debugf("[BUILDER] Use cached version")
  408. b.image = cache.ID
  409. return true, nil
  410. } else {
  411. log.Debugf("[BUILDER] Cache miss")
  412. }
  413. }
  414. return false, nil
  415. }
  416. func (b *Builder) create() (*daemon.Container, error) {
  417. if b.image == "" {
  418. return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
  419. }
  420. b.Config.Image = b.image
  421. config := *b.Config
  422. // Create the container
  423. c, warnings, err := b.Daemon.Create(b.Config, nil, "")
  424. if err != nil {
  425. return nil, err
  426. }
  427. for _, warning := range warnings {
  428. fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
  429. }
  430. b.TmpContainers[c.ID] = struct{}{}
  431. fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
  432. // override the entry point that may have been picked up from the base image
  433. c.Path = config.Cmd[0]
  434. c.Args = config.Cmd[1:]
  435. return c, nil
  436. }
  437. func (b *Builder) run(c *daemon.Container) error {
  438. var errCh chan error
  439. if b.Verbose {
  440. errCh = promise.Go(func() error {
  441. // FIXME: call the 'attach' job so that daemon.Attach can be made private
  442. //
  443. // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
  444. // but without hijacking for stdin. Also, with attach there can be race
  445. // condition because of some output already was printed before it.
  446. return <-b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, nil, b.OutStream, b.ErrStream)
  447. })
  448. }
  449. //start the container
  450. if err := c.Start(); err != nil {
  451. return err
  452. }
  453. if errCh != nil {
  454. if err := <-errCh; err != nil {
  455. return err
  456. }
  457. }
  458. // Wait for it to finish
  459. if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
  460. err := &utils.JSONError{
  461. Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
  462. Code: ret,
  463. }
  464. return err
  465. }
  466. return nil
  467. }
  468. func (b *Builder) checkPathForAddition(orig string) error {
  469. origPath := path.Join(b.contextPath, orig)
  470. origPath, err := filepath.EvalSymlinks(origPath)
  471. if err != nil {
  472. if os.IsNotExist(err) {
  473. return fmt.Errorf("%s: no such file or directory", orig)
  474. }
  475. return err
  476. }
  477. if !strings.HasPrefix(origPath, b.contextPath) {
  478. return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
  479. }
  480. if _, err := os.Stat(origPath); err != nil {
  481. if os.IsNotExist(err) {
  482. return fmt.Errorf("%s: no such file or directory", orig)
  483. }
  484. return err
  485. }
  486. return nil
  487. }
  488. func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
  489. var (
  490. err error
  491. destExists = true
  492. origPath = path.Join(b.contextPath, orig)
  493. destPath = path.Join(container.RootfsPath(), dest)
  494. )
  495. if destPath != container.RootfsPath() {
  496. destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
  497. if err != nil {
  498. return err
  499. }
  500. }
  501. // Preserve the trailing '/'
  502. if strings.HasSuffix(dest, "/") || dest == "." {
  503. destPath = destPath + "/"
  504. }
  505. destStat, err := os.Stat(destPath)
  506. if err != nil {
  507. if !os.IsNotExist(err) {
  508. return err
  509. }
  510. destExists = false
  511. }
  512. fi, err := os.Stat(origPath)
  513. if err != nil {
  514. if os.IsNotExist(err) {
  515. return fmt.Errorf("%s: no such file or directory", orig)
  516. }
  517. return err
  518. }
  519. if fi.IsDir() {
  520. return copyAsDirectory(origPath, destPath, destExists)
  521. }
  522. // If we are adding a remote file (or we've been told not to decompress), do not try to untar it
  523. if decompress {
  524. // First try to unpack the source as an archive
  525. // to support the untar feature we need to clean up the path a little bit
  526. // because tar is very forgiving. First we need to strip off the archive's
  527. // filename from the path but this is only added if it does not end in / .
  528. tarDest := destPath
  529. if strings.HasSuffix(tarDest, "/") {
  530. tarDest = filepath.Dir(destPath)
  531. }
  532. // try to successfully untar the orig
  533. if err := archive.UntarPath(origPath, tarDest); err == nil {
  534. return nil
  535. } else if err != io.EOF {
  536. log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
  537. }
  538. }
  539. if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
  540. return err
  541. }
  542. if err := archive.CopyWithTar(origPath, destPath); err != nil {
  543. return err
  544. }
  545. resPath := destPath
  546. if destExists && destStat.IsDir() {
  547. resPath = path.Join(destPath, path.Base(origPath))
  548. }
  549. return fixPermissions(resPath, 0, 0)
  550. }
  551. func copyAsDirectory(source, destination string, destinationExists bool) error {
  552. if err := archive.CopyWithTar(source, destination); err != nil {
  553. return err
  554. }
  555. if destinationExists {
  556. files, err := ioutil.ReadDir(source)
  557. if err != nil {
  558. return err
  559. }
  560. for _, file := range files {
  561. if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
  562. return err
  563. }
  564. }
  565. return nil
  566. }
  567. return fixPermissions(destination, 0, 0)
  568. }
  569. func fixPermissions(destination string, uid, gid int) error {
  570. return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
  571. if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
  572. return err
  573. }
  574. return nil
  575. })
  576. }
  577. func (b *Builder) clearTmp() {
  578. for c := range b.TmpContainers {
  579. tmp := b.Daemon.Get(c)
  580. if err := b.Daemon.Destroy(tmp); err != nil {
  581. fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
  582. return
  583. }
  584. b.Daemon.DeleteVolumes(tmp.VolumePaths())
  585. delete(b.TmpContainers, c)
  586. fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
  587. }
  588. }