archive.go 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012
  1. package archive
  2. import (
  3. "archive/tar"
  4. "bufio"
  5. "bytes"
  6. "compress/bzip2"
  7. "compress/gzip"
  8. "errors"
  9. "fmt"
  10. "io"
  11. "io/ioutil"
  12. "os"
  13. "os/exec"
  14. "path/filepath"
  15. "runtime"
  16. "strings"
  17. "syscall"
  18. "github.com/Sirupsen/logrus"
  19. "github.com/docker/docker/pkg/fileutils"
  20. "github.com/docker/docker/pkg/idtools"
  21. "github.com/docker/docker/pkg/ioutils"
  22. "github.com/docker/docker/pkg/pools"
  23. "github.com/docker/docker/pkg/promise"
  24. "github.com/docker/docker/pkg/system"
  25. )
  26. type (
  27. // Archive is a type of io.ReadCloser which has two interfaces Read and Closer.
  28. Archive io.ReadCloser
  29. // Reader is a type of io.Reader.
  30. Reader io.Reader
  31. // Compression is the state represtents if compressed or not.
  32. Compression int
  33. // TarChownOptions wraps the chown options UID and GID.
  34. TarChownOptions struct {
  35. UID, GID int
  36. }
  37. // TarOptions wraps the tar options.
  38. TarOptions struct {
  39. IncludeFiles []string
  40. ExcludePatterns []string
  41. Compression Compression
  42. NoLchown bool
  43. UIDMaps []idtools.IDMap
  44. GIDMaps []idtools.IDMap
  45. ChownOpts *TarChownOptions
  46. IncludeSourceDir bool
  47. // When unpacking, specifies whether overwriting a directory with a
  48. // non-directory is allowed and vice versa.
  49. NoOverwriteDirNonDir bool
  50. // For each include when creating an archive, the included name will be
  51. // replaced with the matching name from this map.
  52. RebaseNames map[string]string
  53. }
  54. // Archiver allows the reuse of most utility functions of this package
  55. // with a pluggable Untar function. Also, to facilitate the passing of
  56. // specific id mappings for untar, an archiver can be created with maps
  57. // which will then be passed to Untar operations
  58. Archiver struct {
  59. Untar func(io.Reader, string, *TarOptions) error
  60. UIDMaps []idtools.IDMap
  61. GIDMaps []idtools.IDMap
  62. }
  63. // breakoutError is used to differentiate errors related to breaking out
  64. // When testing archive breakout in the unit tests, this error is expected
  65. // in order for the test to pass.
  66. breakoutError error
  67. )
  68. var (
  69. // ErrNotImplemented is the error message of function not implemented.
  70. ErrNotImplemented = errors.New("Function not implemented")
  71. defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
  72. )
  73. const (
  74. // Uncompressed represents the uncompressed.
  75. Uncompressed Compression = iota
  76. // Bzip2 is bzip2 compression algorithm.
  77. Bzip2
  78. // Gzip is gzip compression algorithm.
  79. Gzip
  80. // Xz is xz compression algorithm.
  81. Xz
  82. )
  83. // IsArchive checks if it is a archive by the header.
  84. func IsArchive(header []byte) bool {
  85. compression := DetectCompression(header)
  86. if compression != Uncompressed {
  87. return true
  88. }
  89. r := tar.NewReader(bytes.NewBuffer(header))
  90. _, err := r.Next()
  91. return err == nil
  92. }
  93. // DetectCompression detects the compression algorithm of the source.
  94. func DetectCompression(source []byte) Compression {
  95. for compression, m := range map[Compression][]byte{
  96. Bzip2: {0x42, 0x5A, 0x68},
  97. Gzip: {0x1F, 0x8B, 0x08},
  98. Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
  99. } {
  100. if len(source) < len(m) {
  101. logrus.Debugf("Len too short")
  102. continue
  103. }
  104. if bytes.Compare(m, source[:len(m)]) == 0 {
  105. return compression
  106. }
  107. }
  108. return Uncompressed
  109. }
  110. func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
  111. args := []string{"xz", "-d", "-c", "-q"}
  112. return cmdStream(exec.Command(args[0], args[1:]...), archive)
  113. }
  114. // DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive.
  115. func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
  116. p := pools.BufioReader32KPool
  117. buf := p.Get(archive)
  118. bs, err := buf.Peek(10)
  119. if err != nil {
  120. return nil, err
  121. }
  122. compression := DetectCompression(bs)
  123. switch compression {
  124. case Uncompressed:
  125. readBufWrapper := p.NewReadCloserWrapper(buf, buf)
  126. return readBufWrapper, nil
  127. case Gzip:
  128. gzReader, err := gzip.NewReader(buf)
  129. if err != nil {
  130. return nil, err
  131. }
  132. readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
  133. return readBufWrapper, nil
  134. case Bzip2:
  135. bz2Reader := bzip2.NewReader(buf)
  136. readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
  137. return readBufWrapper, nil
  138. case Xz:
  139. xzReader, chdone, err := xzDecompress(buf)
  140. if err != nil {
  141. return nil, err
  142. }
  143. readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
  144. return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
  145. <-chdone
  146. return readBufWrapper.Close()
  147. }), nil
  148. default:
  149. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  150. }
  151. }
  152. // CompressStream compresses the dest with specified compression algorithm.
  153. func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
  154. p := pools.BufioWriter32KPool
  155. buf := p.Get(dest)
  156. switch compression {
  157. case Uncompressed:
  158. writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
  159. return writeBufWrapper, nil
  160. case Gzip:
  161. gzWriter := gzip.NewWriter(dest)
  162. writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
  163. return writeBufWrapper, nil
  164. case Bzip2, Xz:
  165. // archive/bzip2 does not support writing, and there is no xz support at all
  166. // However, this is not a problem as docker only currently generates gzipped tars
  167. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  168. default:
  169. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  170. }
  171. }
  172. // Extension returns the extension of a file that uses the specified compression algorithm.
  173. func (compression *Compression) Extension() string {
  174. switch *compression {
  175. case Uncompressed:
  176. return "tar"
  177. case Bzip2:
  178. return "tar.bz2"
  179. case Gzip:
  180. return "tar.gz"
  181. case Xz:
  182. return "tar.xz"
  183. }
  184. return ""
  185. }
  186. type tarAppender struct {
  187. TarWriter *tar.Writer
  188. Buffer *bufio.Writer
  189. // for hardlink mapping
  190. SeenFiles map[uint64]string
  191. UIDMaps []idtools.IDMap
  192. GIDMaps []idtools.IDMap
  193. }
  194. // canonicalTarName provides a platform-independent and consistent posix-style
  195. //path for files and directories to be archived regardless of the platform.
  196. func canonicalTarName(name string, isDir bool) (string, error) {
  197. name, err := CanonicalTarNameForPath(name)
  198. if err != nil {
  199. return "", err
  200. }
  201. // suffix with '/' for directories
  202. if isDir && !strings.HasSuffix(name, "/") {
  203. name += "/"
  204. }
  205. return name, nil
  206. }
  207. func (ta *tarAppender) addTarFile(path, name string) error {
  208. fi, err := os.Lstat(path)
  209. if err != nil {
  210. return err
  211. }
  212. link := ""
  213. if fi.Mode()&os.ModeSymlink != 0 {
  214. if link, err = os.Readlink(path); err != nil {
  215. return err
  216. }
  217. }
  218. hdr, err := tar.FileInfoHeader(fi, link)
  219. if err != nil {
  220. return err
  221. }
  222. hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
  223. name, err = canonicalTarName(name, fi.IsDir())
  224. if err != nil {
  225. return fmt.Errorf("tar: cannot canonicalize path: %v", err)
  226. }
  227. hdr.Name = name
  228. nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
  229. if err != nil {
  230. return err
  231. }
  232. // if it's a regular file and has more than 1 link,
  233. // it's hardlinked, so set the type flag accordingly
  234. if fi.Mode().IsRegular() && nlink > 1 {
  235. // a link should have a name that it links too
  236. // and that linked name should be first in the tar archive
  237. if oldpath, ok := ta.SeenFiles[inode]; ok {
  238. hdr.Typeflag = tar.TypeLink
  239. hdr.Linkname = oldpath
  240. hdr.Size = 0 // This Must be here for the writer math to add up!
  241. } else {
  242. ta.SeenFiles[inode] = name
  243. }
  244. }
  245. capability, _ := system.Lgetxattr(path, "security.capability")
  246. if capability != nil {
  247. hdr.Xattrs = make(map[string]string)
  248. hdr.Xattrs["security.capability"] = string(capability)
  249. }
  250. //handle re-mapping container ID mappings back to host ID mappings before
  251. //writing tar headers/files
  252. if ta.UIDMaps != nil || ta.GIDMaps != nil {
  253. uid, gid, err := getFileUIDGID(fi.Sys())
  254. if err != nil {
  255. return err
  256. }
  257. xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
  258. if err != nil {
  259. return err
  260. }
  261. xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
  262. if err != nil {
  263. return err
  264. }
  265. hdr.Uid = xUID
  266. hdr.Gid = xGID
  267. }
  268. if err := ta.TarWriter.WriteHeader(hdr); err != nil {
  269. return err
  270. }
  271. if hdr.Typeflag == tar.TypeReg {
  272. file, err := os.Open(path)
  273. if err != nil {
  274. return err
  275. }
  276. ta.Buffer.Reset(ta.TarWriter)
  277. defer ta.Buffer.Reset(nil)
  278. _, err = io.Copy(ta.Buffer, file)
  279. file.Close()
  280. if err != nil {
  281. return err
  282. }
  283. err = ta.Buffer.Flush()
  284. if err != nil {
  285. return err
  286. }
  287. }
  288. return nil
  289. }
  290. func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error {
  291. // hdr.Mode is in linux format, which we can use for sycalls,
  292. // but for os.Foo() calls we need the mode converted to os.FileMode,
  293. // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
  294. hdrInfo := hdr.FileInfo()
  295. switch hdr.Typeflag {
  296. case tar.TypeDir:
  297. // Create directory unless it exists as a directory already.
  298. // In that case we just want to merge the two
  299. if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
  300. if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
  301. return err
  302. }
  303. }
  304. case tar.TypeReg, tar.TypeRegA:
  305. // Source is regular file
  306. file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
  307. if err != nil {
  308. return err
  309. }
  310. if _, err := io.Copy(file, reader); err != nil {
  311. file.Close()
  312. return err
  313. }
  314. file.Close()
  315. case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
  316. // Handle this is an OS-specific way
  317. if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
  318. return err
  319. }
  320. case tar.TypeLink:
  321. targetPath := filepath.Join(extractDir, hdr.Linkname)
  322. // check for hardlink breakout
  323. if !strings.HasPrefix(targetPath, extractDir) {
  324. return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
  325. }
  326. if err := os.Link(targetPath, path); err != nil {
  327. return err
  328. }
  329. case tar.TypeSymlink:
  330. // path -> hdr.Linkname = targetPath
  331. // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
  332. targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
  333. // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
  334. // that symlink would first have to be created, which would be caught earlier, at this very check:
  335. if !strings.HasPrefix(targetPath, extractDir) {
  336. return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
  337. }
  338. if err := os.Symlink(hdr.Linkname, path); err != nil {
  339. return err
  340. }
  341. case tar.TypeXGlobalHeader:
  342. logrus.Debugf("PAX Global Extended Headers found and ignored")
  343. return nil
  344. default:
  345. return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
  346. }
  347. // Lchown is not supported on Windows.
  348. if Lchown && runtime.GOOS != "windows" {
  349. if chownOpts == nil {
  350. chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
  351. }
  352. if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
  353. return err
  354. }
  355. }
  356. for key, value := range hdr.Xattrs {
  357. if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
  358. return err
  359. }
  360. }
  361. // There is no LChmod, so ignore mode for symlink. Also, this
  362. // must happen after chown, as that can modify the file mode
  363. if err := handleLChmod(hdr, path, hdrInfo); err != nil {
  364. return err
  365. }
  366. // system.Chtimes doesn't support a NOFOLLOW flag atm
  367. if hdr.Typeflag == tar.TypeLink {
  368. if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
  369. if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
  370. return err
  371. }
  372. }
  373. } else if hdr.Typeflag != tar.TypeSymlink {
  374. if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
  375. return err
  376. }
  377. } else {
  378. ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
  379. if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
  380. return err
  381. }
  382. }
  383. return nil
  384. }
  385. // Tar creates an archive from the directory at `path`, and returns it as a
  386. // stream of bytes.
  387. func Tar(path string, compression Compression) (io.ReadCloser, error) {
  388. return TarWithOptions(path, &TarOptions{Compression: compression})
  389. }
  390. // TarWithOptions creates an archive from the directory at `path`, only including files whose relative
  391. // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
  392. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
  393. // Fix the source path to work with long path names. This is a no-op
  394. // on platforms other than Windows.
  395. srcPath = fixVolumePathPrefix(srcPath)
  396. patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
  397. if err != nil {
  398. return nil, err
  399. }
  400. pipeReader, pipeWriter := io.Pipe()
  401. compressWriter, err := CompressStream(pipeWriter, options.Compression)
  402. if err != nil {
  403. return nil, err
  404. }
  405. go func() {
  406. ta := &tarAppender{
  407. TarWriter: tar.NewWriter(compressWriter),
  408. Buffer: pools.BufioWriter32KPool.Get(nil),
  409. SeenFiles: make(map[uint64]string),
  410. UIDMaps: options.UIDMaps,
  411. GIDMaps: options.GIDMaps,
  412. }
  413. defer func() {
  414. // Make sure to check the error on Close.
  415. if err := ta.TarWriter.Close(); err != nil {
  416. logrus.Debugf("Can't close tar writer: %s", err)
  417. }
  418. if err := compressWriter.Close(); err != nil {
  419. logrus.Debugf("Can't close compress writer: %s", err)
  420. }
  421. if err := pipeWriter.Close(); err != nil {
  422. logrus.Debugf("Can't close pipe writer: %s", err)
  423. }
  424. }()
  425. // this buffer is needed for the duration of this piped stream
  426. defer pools.BufioWriter32KPool.Put(ta.Buffer)
  427. // In general we log errors here but ignore them because
  428. // during e.g. a diff operation the container can continue
  429. // mutating the filesystem and we can see transient errors
  430. // from this
  431. stat, err := os.Lstat(srcPath)
  432. if err != nil {
  433. return
  434. }
  435. if !stat.IsDir() {
  436. // We can't later join a non-dir with any includes because the
  437. // 'walk' will error if "file/." is stat-ed and "file" is not a
  438. // directory. So, we must split the source path and use the
  439. // basename as the include.
  440. if len(options.IncludeFiles) > 0 {
  441. logrus.Warn("Tar: Can't archive a file with includes")
  442. }
  443. dir, base := SplitPathDirEntry(srcPath)
  444. srcPath = dir
  445. options.IncludeFiles = []string{base}
  446. }
  447. if len(options.IncludeFiles) == 0 {
  448. options.IncludeFiles = []string{"."}
  449. }
  450. seen := make(map[string]bool)
  451. for _, include := range options.IncludeFiles {
  452. rebaseName := options.RebaseNames[include]
  453. walkRoot := getWalkRoot(srcPath, include)
  454. filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
  455. if err != nil {
  456. logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
  457. return nil
  458. }
  459. relFilePath, err := filepath.Rel(srcPath, filePath)
  460. if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
  461. // Error getting relative path OR we are looking
  462. // at the source directory path. Skip in both situations.
  463. return nil
  464. }
  465. if options.IncludeSourceDir && include == "." && relFilePath != "." {
  466. relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
  467. }
  468. skip := false
  469. // If "include" is an exact match for the current file
  470. // then even if there's an "excludePatterns" pattern that
  471. // matches it, don't skip it. IOW, assume an explicit 'include'
  472. // is asking for that file no matter what - which is true
  473. // for some files, like .dockerignore and Dockerfile (sometimes)
  474. if include != relFilePath {
  475. skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
  476. if err != nil {
  477. logrus.Debugf("Error matching %s: %v", relFilePath, err)
  478. return err
  479. }
  480. }
  481. if skip {
  482. if !exceptions && f.IsDir() {
  483. return filepath.SkipDir
  484. }
  485. return nil
  486. }
  487. if seen[relFilePath] {
  488. return nil
  489. }
  490. seen[relFilePath] = true
  491. // Rename the base resource.
  492. if rebaseName != "" {
  493. var replacement string
  494. if rebaseName != string(filepath.Separator) {
  495. // Special case the root directory to replace with an
  496. // empty string instead so that we don't end up with
  497. // double slashes in the paths.
  498. replacement = rebaseName
  499. }
  500. relFilePath = strings.Replace(relFilePath, include, replacement, 1)
  501. }
  502. if err := ta.addTarFile(filePath, relFilePath); err != nil {
  503. logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
  504. }
  505. return nil
  506. })
  507. }
  508. }()
  509. return pipeReader, nil
  510. }
  511. // Unpack unpacks the decompressedArchive to dest with options.
  512. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
  513. tr := tar.NewReader(decompressedArchive)
  514. trBuf := pools.BufioReader32KPool.Get(nil)
  515. defer pools.BufioReader32KPool.Put(trBuf)
  516. var dirs []*tar.Header
  517. remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
  518. if err != nil {
  519. return err
  520. }
  521. // Iterate through the files in the archive.
  522. loop:
  523. for {
  524. hdr, err := tr.Next()
  525. if err == io.EOF {
  526. // end of tar archive
  527. break
  528. }
  529. if err != nil {
  530. return err
  531. }
  532. // Normalize name, for safety and for a simple is-root check
  533. // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
  534. // This keeps "..\" as-is, but normalizes "\..\" to "\".
  535. hdr.Name = filepath.Clean(hdr.Name)
  536. for _, exclude := range options.ExcludePatterns {
  537. if strings.HasPrefix(hdr.Name, exclude) {
  538. continue loop
  539. }
  540. }
  541. // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
  542. // the filepath format for the OS on which the daemon is running. Hence
  543. // the check for a slash-suffix MUST be done in an OS-agnostic way.
  544. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
  545. // Not the root directory, ensure that the parent directory exists
  546. parent := filepath.Dir(hdr.Name)
  547. parentPath := filepath.Join(dest, parent)
  548. if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
  549. err = system.MkdirAll(parentPath, 0777)
  550. if err != nil {
  551. return err
  552. }
  553. }
  554. }
  555. path := filepath.Join(dest, hdr.Name)
  556. rel, err := filepath.Rel(dest, path)
  557. if err != nil {
  558. return err
  559. }
  560. if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
  561. return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
  562. }
  563. // If path exits we almost always just want to remove and replace it
  564. // The only exception is when it is a directory *and* the file from
  565. // the layer is also a directory. Then we want to merge them (i.e.
  566. // just apply the metadata from the layer).
  567. if fi, err := os.Lstat(path); err == nil {
  568. if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
  569. // If NoOverwriteDirNonDir is true then we cannot replace
  570. // an existing directory with a non-directory from the archive.
  571. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
  572. }
  573. if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
  574. // If NoOverwriteDirNonDir is true then we cannot replace
  575. // an existing non-directory with a directory from the archive.
  576. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
  577. }
  578. if fi.IsDir() && hdr.Name == "." {
  579. continue
  580. }
  581. if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
  582. if err := os.RemoveAll(path); err != nil {
  583. return err
  584. }
  585. }
  586. }
  587. trBuf.Reset(tr)
  588. // if the options contain a uid & gid maps, convert header uid/gid
  589. // entries using the maps such that lchown sets the proper mapped
  590. // uid/gid after writing the file. We only perform this mapping if
  591. // the file isn't already owned by the remapped root UID or GID, as
  592. // that specific uid/gid has no mapping from container -> host, and
  593. // those files already have the proper ownership for inside the
  594. // container.
  595. if hdr.Uid != remappedRootUID {
  596. xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
  597. if err != nil {
  598. return err
  599. }
  600. hdr.Uid = xUID
  601. }
  602. if hdr.Gid != remappedRootGID {
  603. xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
  604. if err != nil {
  605. return err
  606. }
  607. hdr.Gid = xGID
  608. }
  609. if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
  610. return err
  611. }
  612. // Directory mtimes must be handled at the end to avoid further
  613. // file creation in them to modify the directory mtime
  614. if hdr.Typeflag == tar.TypeDir {
  615. dirs = append(dirs, hdr)
  616. }
  617. }
  618. for _, hdr := range dirs {
  619. path := filepath.Join(dest, hdr.Name)
  620. if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
  621. return err
  622. }
  623. }
  624. return nil
  625. }
  626. // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
  627. // and unpacks it into the directory at `dest`.
  628. // The archive may be compressed with one of the following algorithms:
  629. // identity (uncompressed), gzip, bzip2, xz.
  630. // FIXME: specify behavior when target path exists vs. doesn't exist.
  631. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
  632. return untarHandler(tarArchive, dest, options, true)
  633. }
  634. // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
  635. // and unpacks it into the directory at `dest`.
  636. // The archive must be an uncompressed stream.
  637. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
  638. return untarHandler(tarArchive, dest, options, false)
  639. }
  640. // Handler for teasing out the automatic decompression
  641. func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
  642. if tarArchive == nil {
  643. return fmt.Errorf("Empty archive")
  644. }
  645. dest = filepath.Clean(dest)
  646. if options == nil {
  647. options = &TarOptions{}
  648. }
  649. if options.ExcludePatterns == nil {
  650. options.ExcludePatterns = []string{}
  651. }
  652. r := tarArchive
  653. if decompress {
  654. decompressedArchive, err := DecompressStream(tarArchive)
  655. if err != nil {
  656. return err
  657. }
  658. defer decompressedArchive.Close()
  659. r = decompressedArchive
  660. }
  661. return Unpack(r, dest, options)
  662. }
  663. // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
  664. // If either Tar or Untar fails, TarUntar aborts and returns the error.
  665. func (archiver *Archiver) TarUntar(src, dst string) error {
  666. logrus.Debugf("TarUntar(%s %s)", src, dst)
  667. archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
  668. if err != nil {
  669. return err
  670. }
  671. defer archive.Close()
  672. var options *TarOptions
  673. if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
  674. options = &TarOptions{
  675. UIDMaps: archiver.UIDMaps,
  676. GIDMaps: archiver.GIDMaps,
  677. }
  678. }
  679. return archiver.Untar(archive, dst, options)
  680. }
  681. // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
  682. // If either Tar or Untar fails, TarUntar aborts and returns the error.
  683. func TarUntar(src, dst string) error {
  684. return defaultArchiver.TarUntar(src, dst)
  685. }
  686. // UntarPath untar a file from path to a destination, src is the source tar file path.
  687. func (archiver *Archiver) UntarPath(src, dst string) error {
  688. archive, err := os.Open(src)
  689. if err != nil {
  690. return err
  691. }
  692. defer archive.Close()
  693. var options *TarOptions
  694. if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
  695. options = &TarOptions{
  696. UIDMaps: archiver.UIDMaps,
  697. GIDMaps: archiver.GIDMaps,
  698. }
  699. }
  700. if err := archiver.Untar(archive, dst, options); err != nil {
  701. return err
  702. }
  703. return nil
  704. }
  705. // UntarPath is a convenience function which looks for an archive
  706. // at filesystem path `src`, and unpacks it at `dst`.
  707. func UntarPath(src, dst string) error {
  708. return defaultArchiver.UntarPath(src, dst)
  709. }
  710. // CopyWithTar creates a tar archive of filesystem path `src`, and
  711. // unpacks it at filesystem path `dst`.
  712. // The archive is streamed directly with fixed buffering and no
  713. // intermediary disk IO.
  714. func (archiver *Archiver) CopyWithTar(src, dst string) error {
  715. srcSt, err := os.Stat(src)
  716. if err != nil {
  717. return err
  718. }
  719. if !srcSt.IsDir() {
  720. return archiver.CopyFileWithTar(src, dst)
  721. }
  722. // Create dst, copy src's content into it
  723. logrus.Debugf("Creating dest directory: %s", dst)
  724. if err := system.MkdirAll(dst, 0755); err != nil {
  725. return err
  726. }
  727. logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
  728. return archiver.TarUntar(src, dst)
  729. }
  730. // CopyWithTar creates a tar archive of filesystem path `src`, and
  731. // unpacks it at filesystem path `dst`.
  732. // The archive is streamed directly with fixed buffering and no
  733. // intermediary disk IO.
  734. func CopyWithTar(src, dst string) error {
  735. return defaultArchiver.CopyWithTar(src, dst)
  736. }
  737. // CopyFileWithTar emulates the behavior of the 'cp' command-line
  738. // for a single file. It copies a regular file from path `src` to
  739. // path `dst`, and preserves all its metadata.
  740. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
  741. logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
  742. srcSt, err := os.Stat(src)
  743. if err != nil {
  744. return err
  745. }
  746. if srcSt.IsDir() {
  747. return fmt.Errorf("Can't copy a directory")
  748. }
  749. // Clean up the trailing slash. This must be done in an operating
  750. // system specific manner.
  751. if dst[len(dst)-1] == os.PathSeparator {
  752. dst = filepath.Join(dst, filepath.Base(src))
  753. }
  754. // Create the holding directory if necessary
  755. if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
  756. return err
  757. }
  758. r, w := io.Pipe()
  759. errC := promise.Go(func() error {
  760. defer w.Close()
  761. srcF, err := os.Open(src)
  762. if err != nil {
  763. return err
  764. }
  765. defer srcF.Close()
  766. hdr, err := tar.FileInfoHeader(srcSt, "")
  767. if err != nil {
  768. return err
  769. }
  770. hdr.Name = filepath.Base(dst)
  771. hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
  772. remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
  773. if err != nil {
  774. return err
  775. }
  776. // only perform mapping if the file being copied isn't already owned by the
  777. // uid or gid of the remapped root in the container
  778. if remappedRootUID != hdr.Uid {
  779. xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
  780. if err != nil {
  781. return err
  782. }
  783. hdr.Uid = xUID
  784. }
  785. if remappedRootGID != hdr.Gid {
  786. xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
  787. if err != nil {
  788. return err
  789. }
  790. hdr.Gid = xGID
  791. }
  792. tw := tar.NewWriter(w)
  793. defer tw.Close()
  794. if err := tw.WriteHeader(hdr); err != nil {
  795. return err
  796. }
  797. if _, err := io.Copy(tw, srcF); err != nil {
  798. return err
  799. }
  800. return nil
  801. })
  802. defer func() {
  803. if er := <-errC; err != nil {
  804. err = er
  805. }
  806. }()
  807. return archiver.Untar(r, filepath.Dir(dst), nil)
  808. }
  809. // CopyFileWithTar emulates the behavior of the 'cp' command-line
  810. // for a single file. It copies a regular file from path `src` to
  811. // path `dst`, and preserves all its metadata.
  812. //
  813. // Destination handling is in an operating specific manner depending
  814. // where the daemon is running. If `dst` ends with a trailing slash
  815. // the final destination path will be `dst/base(src)` (Linux) or
  816. // `dst\base(src)` (Windows).
  817. func CopyFileWithTar(src, dst string) (err error) {
  818. return defaultArchiver.CopyFileWithTar(src, dst)
  819. }
  820. // cmdStream executes a command, and returns its stdout as a stream.
  821. // If the command fails to run or doesn't complete successfully, an error
  822. // will be returned, including anything written on stderr.
  823. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
  824. chdone := make(chan struct{})
  825. cmd.Stdin = input
  826. pipeR, pipeW := io.Pipe()
  827. cmd.Stdout = pipeW
  828. var errBuf bytes.Buffer
  829. cmd.Stderr = &errBuf
  830. // Run the command and return the pipe
  831. if err := cmd.Start(); err != nil {
  832. return nil, nil, err
  833. }
  834. // Copy stdout to the returned pipe
  835. go func() {
  836. if err := cmd.Wait(); err != nil {
  837. pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
  838. } else {
  839. pipeW.Close()
  840. }
  841. close(chdone)
  842. }()
  843. return pipeR, chdone, nil
  844. }
  845. // NewTempArchive reads the content of src into a temporary file, and returns the contents
  846. // of that file as an archive. The archive can only be read once - as soon as reading completes,
  847. // the file will be deleted.
  848. func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
  849. f, err := ioutil.TempFile(dir, "")
  850. if err != nil {
  851. return nil, err
  852. }
  853. if _, err := io.Copy(f, src); err != nil {
  854. return nil, err
  855. }
  856. if _, err := f.Seek(0, 0); err != nil {
  857. return nil, err
  858. }
  859. st, err := f.Stat()
  860. if err != nil {
  861. return nil, err
  862. }
  863. size := st.Size()
  864. return &TempArchive{File: f, Size: size}, nil
  865. }
  866. // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
  867. // the file will be deleted.
  868. type TempArchive struct {
  869. *os.File
  870. Size int64 // Pre-computed from Stat().Size() as a convenience
  871. read int64
  872. closed bool
  873. }
  874. // Close closes the underlying file if it's still open, or does a no-op
  875. // to allow callers to try to close the TempArchive multiple times safely.
  876. func (archive *TempArchive) Close() error {
  877. if archive.closed {
  878. return nil
  879. }
  880. archive.closed = true
  881. return archive.File.Close()
  882. }
  883. func (archive *TempArchive) Read(data []byte) (int, error) {
  884. n, err := archive.File.Read(data)
  885. archive.read += int64(n)
  886. if err != nil || archive.read == archive.Size {
  887. archive.Close()
  888. os.Remove(archive.File.Name())
  889. }
  890. return n, err
  891. }