internals.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. package builder
  2. // internals for handling commands. Covers many areas and a lot of
  3. // non-contiguous functionality. Please read the comments.
  4. import (
  5. "crypto/sha256"
  6. "encoding/hex"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "net/url"
  11. "os"
  12. "path"
  13. "path/filepath"
  14. "sort"
  15. "strings"
  16. "syscall"
  17. "time"
  18. "github.com/docker/docker/archive"
  19. "github.com/docker/docker/daemon"
  20. imagepkg "github.com/docker/docker/image"
  21. "github.com/docker/docker/pkg/log"
  22. "github.com/docker/docker/pkg/parsers"
  23. "github.com/docker/docker/pkg/symlink"
  24. "github.com/docker/docker/pkg/system"
  25. "github.com/docker/docker/pkg/tarsum"
  26. "github.com/docker/docker/registry"
  27. "github.com/docker/docker/utils"
  28. )
  29. func (b *Builder) readContext(context io.Reader) error {
  30. tmpdirPath, err := ioutil.TempDir("", "docker-build")
  31. if err != nil {
  32. return err
  33. }
  34. decompressedStream, err := archive.DecompressStream(context)
  35. if err != nil {
  36. return err
  37. }
  38. if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
  39. return err
  40. }
  41. if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
  42. return err
  43. }
  44. b.contextPath = tmpdirPath
  45. return nil
  46. }
  47. func (b *Builder) commit(id string, autoCmd []string, comment string) error {
  48. if b.image == "" {
  49. return fmt.Errorf("Please provide a source image with `from` prior to commit")
  50. }
  51. b.Config.Image = b.image
  52. if id == "" {
  53. cmd := b.Config.Cmd
  54. b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
  55. defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
  56. hit, err := b.probeCache()
  57. if err != nil {
  58. return err
  59. }
  60. if hit {
  61. return nil
  62. }
  63. container, err := b.create()
  64. if err != nil {
  65. return err
  66. }
  67. id = container.ID
  68. if err := container.Mount(); err != nil {
  69. return err
  70. }
  71. defer container.Unmount()
  72. }
  73. container := b.Daemon.Get(id)
  74. if container == nil {
  75. return fmt.Errorf("An error occured while creating the container")
  76. }
  77. // Note: Actually copy the struct
  78. autoConfig := *b.Config
  79. autoConfig.Cmd = autoCmd
  80. // Commit the container
  81. image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
  82. if err != nil {
  83. return err
  84. }
  85. b.image = image.ID
  86. return nil
  87. }
  88. type copyInfo struct {
  89. origPath string
  90. destPath string
  91. hashPath string
  92. decompress bool
  93. tmpDir string
  94. }
  95. func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
  96. if b.context == nil {
  97. return fmt.Errorf("No context given. Impossible to use %s", cmdName)
  98. }
  99. if len(args) < 2 {
  100. return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
  101. }
  102. dest := args[len(args)-1] // last one is always the dest
  103. if len(args) > 2 && dest[len(dest)-1] != '/' {
  104. return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
  105. }
  106. copyInfos := make([]copyInfo, len(args)-1)
  107. hasHash := false
  108. srcPaths := ""
  109. origPaths := ""
  110. b.Config.Image = b.image
  111. defer func() {
  112. for _, ci := range copyInfos {
  113. if ci.tmpDir != "" {
  114. os.RemoveAll(ci.tmpDir)
  115. }
  116. }
  117. }()
  118. // Loop through each src file and calculate the info we need to
  119. // do the copy (e.g. hash value if cached). Don't actually do
  120. // the copy until we've looked at all src files
  121. for i, orig := range args[0 : len(args)-1] {
  122. ci := &copyInfos[i]
  123. ci.origPath = orig
  124. ci.destPath = dest
  125. ci.decompress = true
  126. err := calcCopyInfo(b, cmdName, ci, allowRemote, allowDecompression)
  127. if err != nil {
  128. return err
  129. }
  130. origPaths += " " + ci.origPath // will have leading space
  131. if ci.hashPath == "" {
  132. srcPaths += " " + ci.origPath // note leading space
  133. } else {
  134. srcPaths += " " + ci.hashPath // note leading space
  135. hasHash = true
  136. }
  137. }
  138. cmd := b.Config.Cmd
  139. b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s%s in %s", cmdName, srcPaths, dest)}
  140. defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
  141. hit, err := b.probeCache()
  142. if err != nil {
  143. return err
  144. }
  145. // If we do not have at least one hash, never use the cache
  146. if hit && hasHash {
  147. return nil
  148. }
  149. container, _, err := b.Daemon.Create(b.Config, "")
  150. if err != nil {
  151. return err
  152. }
  153. b.TmpContainers[container.ID] = struct{}{}
  154. if err := container.Mount(); err != nil {
  155. return err
  156. }
  157. defer container.Unmount()
  158. for _, ci := range copyInfos {
  159. if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
  160. return err
  161. }
  162. }
  163. if err := b.commit(container.ID, cmd, fmt.Sprintf("%s%s in %s", cmdName, origPaths, dest)); err != nil {
  164. return err
  165. }
  166. return nil
  167. }
  168. func calcCopyInfo(b *Builder, cmdName string, ci *copyInfo, allowRemote bool, allowDecompression bool) error {
  169. var (
  170. remoteHash string
  171. isRemote bool
  172. )
  173. saveOrig := ci.origPath
  174. isRemote = utils.IsURL(ci.origPath)
  175. if isRemote && !allowRemote {
  176. return fmt.Errorf("Source can't be an URL for %s", cmdName)
  177. } else if isRemote {
  178. // Initiate the download
  179. resp, err := utils.Download(ci.origPath)
  180. if err != nil {
  181. return err
  182. }
  183. // Create a tmp dir
  184. tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
  185. if err != nil {
  186. return err
  187. }
  188. ci.tmpDir = tmpDirName
  189. // Create a tmp file within our tmp dir
  190. tmpFileName := path.Join(tmpDirName, "tmp")
  191. tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
  192. if err != nil {
  193. return err
  194. }
  195. // Download and dump result to tmp file
  196. if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil {
  197. tmpFile.Close()
  198. return err
  199. }
  200. fmt.Fprintf(b.OutStream, "\n")
  201. tmpFile.Close()
  202. // Remove the mtime of the newly created tmp file
  203. if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
  204. return err
  205. }
  206. ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
  207. // Process the checksum
  208. r, err := archive.Tar(tmpFileName, archive.Uncompressed)
  209. if err != nil {
  210. return err
  211. }
  212. tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
  213. if err != nil {
  214. return err
  215. }
  216. if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
  217. return err
  218. }
  219. remoteHash = tarSum.Sum(nil)
  220. r.Close()
  221. // If the destination is a directory, figure out the filename.
  222. if strings.HasSuffix(ci.destPath, "/") {
  223. u, err := url.Parse(saveOrig)
  224. if err != nil {
  225. return err
  226. }
  227. path := u.Path
  228. if strings.HasSuffix(path, "/") {
  229. path = path[:len(path)-1]
  230. }
  231. parts := strings.Split(path, "/")
  232. filename := parts[len(parts)-1]
  233. if filename == "" {
  234. return fmt.Errorf("cannot determine filename from url: %s", u)
  235. }
  236. ci.destPath = ci.destPath + filename
  237. }
  238. }
  239. if err := b.checkPathForAddition(ci.origPath); err != nil {
  240. return err
  241. }
  242. // Hash path and check the cache
  243. if b.UtilizeCache {
  244. var (
  245. sums = b.context.GetSums()
  246. )
  247. if remoteHash != "" {
  248. ci.hashPath = remoteHash
  249. } else if fi, err := os.Stat(path.Join(b.contextPath, ci.origPath)); err != nil {
  250. return err
  251. } else if fi.IsDir() {
  252. var subfiles []string
  253. for _, fileInfo := range sums {
  254. absFile := path.Join(b.contextPath, fileInfo.Name())
  255. absOrigPath := path.Join(b.contextPath, ci.origPath)
  256. if strings.HasPrefix(absFile, absOrigPath) {
  257. subfiles = append(subfiles, fileInfo.Sum())
  258. }
  259. }
  260. sort.Strings(subfiles)
  261. hasher := sha256.New()
  262. hasher.Write([]byte(strings.Join(subfiles, ",")))
  263. ci.hashPath = "dir:" + hex.EncodeToString(hasher.Sum(nil))
  264. } else {
  265. if ci.origPath[0] == '/' && len(ci.origPath) > 1 {
  266. ci.origPath = ci.origPath[1:]
  267. }
  268. ci.origPath = strings.TrimPrefix(ci.origPath, "./")
  269. // This will match on the first file in sums of the archive
  270. if fis := sums.GetFile(ci.origPath); fis != nil {
  271. ci.hashPath = "file:" + fis.Sum()
  272. }
  273. }
  274. }
  275. if !allowDecompression || isRemote {
  276. ci.decompress = false
  277. }
  278. return nil
  279. }
  280. func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
  281. remote, tag := parsers.ParseRepositoryTag(name)
  282. if tag == "" {
  283. tag = "latest"
  284. }
  285. pullRegistryAuth := b.AuthConfig
  286. if len(b.AuthConfigFile.Configs) > 0 {
  287. // The request came with a full auth config file, we prefer to use that
  288. endpoint, _, err := registry.ResolveRepositoryName(remote)
  289. if err != nil {
  290. return nil, err
  291. }
  292. resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(endpoint)
  293. pullRegistryAuth = &resolvedAuth
  294. }
  295. job := b.Engine.Job("pull", remote, tag)
  296. job.SetenvBool("json", b.StreamFormatter.Json())
  297. job.SetenvBool("parallel", true)
  298. job.SetenvJson("authConfig", pullRegistryAuth)
  299. job.Stdout.Add(b.OutOld)
  300. if err := job.Run(); err != nil {
  301. return nil, err
  302. }
  303. image, err := b.Daemon.Repositories().LookupImage(name)
  304. if err != nil {
  305. return nil, err
  306. }
  307. return image, nil
  308. }
  309. func (b *Builder) processImageFrom(img *imagepkg.Image) error {
  310. b.image = img.ID
  311. if img.Config != nil {
  312. b.Config = img.Config
  313. }
  314. if len(b.Config.Env) == 0 {
  315. b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
  316. }
  317. // Process ONBUILD triggers if they exist
  318. if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
  319. fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
  320. }
  321. // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
  322. onBuildTriggers := b.Config.OnBuild
  323. b.Config.OnBuild = []string{}
  324. // FIXME rewrite this so that builder/parser is used; right now steps in
  325. // onbuild are muted because we have no good way to represent the step
  326. // number
  327. for _, step := range onBuildTriggers {
  328. splitStep := strings.Split(step, " ")
  329. stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " "))
  330. switch stepInstruction {
  331. case "ONBUILD":
  332. return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
  333. case "MAINTAINER", "FROM":
  334. return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
  335. }
  336. // FIXME we have to run the evaluator manually here. This does not belong
  337. // in this function. Once removed, the init() in evaluator.go should no
  338. // longer be necessary.
  339. if f, ok := evaluateTable[strings.ToLower(stepInstruction)]; ok {
  340. if err := f(b, splitStep[1:], nil); err != nil {
  341. return err
  342. }
  343. } else {
  344. return fmt.Errorf("%s doesn't appear to be a valid Dockerfile instruction", splitStep[0])
  345. }
  346. }
  347. return nil
  348. }
  349. // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
  350. // and if so attempts to look up the current `b.image` and `b.Config` pair
  351. // in the current server `b.Daemon`. If an image is found, probeCache returns
  352. // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
  353. // is any error, it returns `(false, err)`.
  354. func (b *Builder) probeCache() (bool, error) {
  355. if b.UtilizeCache {
  356. if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil {
  357. return false, err
  358. } else if cache != nil {
  359. fmt.Fprintf(b.OutStream, " ---> Using cache\n")
  360. log.Debugf("[BUILDER] Use cached version")
  361. b.image = cache.ID
  362. return true, nil
  363. } else {
  364. log.Debugf("[BUILDER] Cache miss")
  365. }
  366. }
  367. return false, nil
  368. }
  369. func (b *Builder) create() (*daemon.Container, error) {
  370. if b.image == "" {
  371. return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
  372. }
  373. b.Config.Image = b.image
  374. config := *b.Config
  375. // Create the container
  376. c, warnings, err := b.Daemon.Create(b.Config, "")
  377. if err != nil {
  378. return nil, err
  379. }
  380. for _, warning := range warnings {
  381. fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
  382. }
  383. b.TmpContainers[c.ID] = struct{}{}
  384. fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
  385. // override the entry point that may have been picked up from the base image
  386. c.Path = config.Cmd[0]
  387. c.Args = config.Cmd[1:]
  388. return c, nil
  389. }
  390. func (b *Builder) run(c *daemon.Container) error {
  391. var errCh chan error
  392. if b.Verbose {
  393. errCh = utils.Go(func() error {
  394. // FIXME: call the 'attach' job so that daemon.Attach can be made private
  395. //
  396. // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
  397. // but without hijacking for stdin. Also, with attach there can be race
  398. // condition because of some output already was printed before it.
  399. return <-b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, nil, b.OutStream, b.ErrStream)
  400. })
  401. }
  402. //start the container
  403. if err := c.Start(); err != nil {
  404. return err
  405. }
  406. if errCh != nil {
  407. if err := <-errCh; err != nil {
  408. return err
  409. }
  410. }
  411. // Wait for it to finish
  412. if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
  413. err := &utils.JSONError{
  414. Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
  415. Code: ret,
  416. }
  417. return err
  418. }
  419. return nil
  420. }
  421. func (b *Builder) checkPathForAddition(orig string) error {
  422. origPath := path.Join(b.contextPath, orig)
  423. origPath, err := filepath.EvalSymlinks(origPath)
  424. if err != nil {
  425. if os.IsNotExist(err) {
  426. return fmt.Errorf("%s: no such file or directory", orig)
  427. }
  428. return err
  429. }
  430. if !strings.HasPrefix(origPath, b.contextPath) {
  431. return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
  432. }
  433. if _, err := os.Stat(origPath); err != nil {
  434. if os.IsNotExist(err) {
  435. return fmt.Errorf("%s: no such file or directory", orig)
  436. }
  437. return err
  438. }
  439. return nil
  440. }
  441. func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
  442. var (
  443. err error
  444. destExists = true
  445. origPath = path.Join(b.contextPath, orig)
  446. destPath = path.Join(container.RootfsPath(), dest)
  447. )
  448. if destPath != container.RootfsPath() {
  449. destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
  450. if err != nil {
  451. return err
  452. }
  453. }
  454. // Preserve the trailing '/'
  455. if strings.HasSuffix(dest, "/") || dest == "." {
  456. destPath = destPath + "/"
  457. }
  458. destStat, err := os.Stat(destPath)
  459. if err != nil {
  460. if !os.IsNotExist(err) {
  461. return err
  462. }
  463. destExists = false
  464. }
  465. fi, err := os.Stat(origPath)
  466. if err != nil {
  467. if os.IsNotExist(err) {
  468. return fmt.Errorf("%s: no such file or directory", orig)
  469. }
  470. return err
  471. }
  472. if fi.IsDir() {
  473. return copyAsDirectory(origPath, destPath, destExists)
  474. }
  475. // If we are adding a remote file (or we've been told not to decompress), do not try to untar it
  476. if decompress {
  477. // First try to unpack the source as an archive
  478. // to support the untar feature we need to clean up the path a little bit
  479. // because tar is very forgiving. First we need to strip off the archive's
  480. // filename from the path but this is only added if it does not end in / .
  481. tarDest := destPath
  482. if strings.HasSuffix(tarDest, "/") {
  483. tarDest = filepath.Dir(destPath)
  484. }
  485. // try to successfully untar the orig
  486. if err := archive.UntarPath(origPath, tarDest); err == nil {
  487. return nil
  488. } else if err != io.EOF {
  489. log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
  490. }
  491. }
  492. if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
  493. return err
  494. }
  495. if err := archive.CopyWithTar(origPath, destPath); err != nil {
  496. return err
  497. }
  498. resPath := destPath
  499. if destExists && destStat.IsDir() {
  500. resPath = path.Join(destPath, path.Base(origPath))
  501. }
  502. return fixPermissions(resPath, 0, 0)
  503. }
  504. func copyAsDirectory(source, destination string, destinationExists bool) error {
  505. if err := archive.CopyWithTar(source, destination); err != nil {
  506. return err
  507. }
  508. if destinationExists {
  509. files, err := ioutil.ReadDir(source)
  510. if err != nil {
  511. return err
  512. }
  513. for _, file := range files {
  514. if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
  515. return err
  516. }
  517. }
  518. return nil
  519. }
  520. return fixPermissions(destination, 0, 0)
  521. }
  522. func fixPermissions(destination string, uid, gid int) error {
  523. return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
  524. if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
  525. return err
  526. }
  527. return nil
  528. })
  529. }
  530. func (b *Builder) clearTmp() {
  531. for c := range b.TmpContainers {
  532. tmp := b.Daemon.Get(c)
  533. if err := b.Daemon.Destroy(tmp); err != nil {
  534. fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
  535. } else {
  536. delete(b.TmpContainers, c)
  537. fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
  538. }
  539. }
  540. }