archive.go 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036
  1. package archive
  2. import (
  3. "archive/tar"
  4. "bufio"
  5. "bytes"
  6. "compress/bzip2"
  7. "compress/gzip"
  8. "errors"
  9. "fmt"
  10. "io"
  11. "io/ioutil"
  12. "os"
  13. "os/exec"
  14. "path/filepath"
  15. "runtime"
  16. "strings"
  17. "syscall"
  18. "github.com/Sirupsen/logrus"
  19. "github.com/docker/docker/pkg/fileutils"
  20. "github.com/docker/docker/pkg/idtools"
  21. "github.com/docker/docker/pkg/pools"
  22. "github.com/docker/docker/pkg/promise"
  23. "github.com/docker/docker/pkg/system"
  24. )
  25. type (
  26. // Archive is a type of io.ReadCloser which has two interfaces Read and Closer.
  27. Archive io.ReadCloser
  28. // Reader is a type of io.Reader.
  29. Reader io.Reader
  30. // Compression is the state represtents if compressed or not.
  31. Compression int
  32. // TarChownOptions wraps the chown options UID and GID.
  33. TarChownOptions struct {
  34. UID, GID int
  35. }
  36. // TarOptions wraps the tar options.
  37. TarOptions struct {
  38. IncludeFiles []string
  39. ExcludePatterns []string
  40. Compression Compression
  41. NoLchown bool
  42. UIDMaps []idtools.IDMap
  43. GIDMaps []idtools.IDMap
  44. ChownOpts *TarChownOptions
  45. IncludeSourceDir bool
  46. // When unpacking, specifies whether overwriting a directory with a
  47. // non-directory is allowed and vice versa.
  48. NoOverwriteDirNonDir bool
  49. // For each include when creating an archive, the included name will be
  50. // replaced with the matching name from this map.
  51. RebaseNames map[string]string
  52. }
  53. // Archiver allows the reuse of most utility functions of this package
  54. // with a pluggable Untar function. Also, to facilitate the passing of
  55. // specific id mappings for untar, an archiver can be created with maps
  56. // which will then be passed to Untar operations
  57. Archiver struct {
  58. Untar func(io.Reader, string, *TarOptions) error
  59. UIDMaps []idtools.IDMap
  60. GIDMaps []idtools.IDMap
  61. }
  62. // breakoutError is used to differentiate errors related to breaking out
  63. // When testing archive breakout in the unit tests, this error is expected
  64. // in order for the test to pass.
  65. breakoutError error
  66. )
  67. var (
  68. // ErrNotImplemented is the error message of function not implemented.
  69. ErrNotImplemented = errors.New("Function not implemented")
  70. defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
  71. )
  72. const (
  73. // Uncompressed represents the uncompressed.
  74. Uncompressed Compression = iota
  75. // Bzip2 is bzip2 compression algorithm.
  76. Bzip2
  77. // Gzip is gzip compression algorithm.
  78. Gzip
  79. // Xz is xz compression algorithm.
  80. Xz
  81. )
  82. // IsArchive checks if it is a archive by the header.
  83. func IsArchive(header []byte) bool {
  84. compression := DetectCompression(header)
  85. if compression != Uncompressed {
  86. return true
  87. }
  88. r := tar.NewReader(bytes.NewBuffer(header))
  89. _, err := r.Next()
  90. return err == nil
  91. }
  92. // DetectCompression detects the compression algorithm of the source.
  93. func DetectCompression(source []byte) Compression {
  94. for compression, m := range map[Compression][]byte{
  95. Bzip2: {0x42, 0x5A, 0x68},
  96. Gzip: {0x1F, 0x8B, 0x08},
  97. Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
  98. } {
  99. if len(source) < len(m) {
  100. logrus.Debugf("Len too short")
  101. continue
  102. }
  103. if bytes.Compare(m, source[:len(m)]) == 0 {
  104. return compression
  105. }
  106. }
  107. return Uncompressed
  108. }
  109. func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
  110. args := []string{"xz", "-d", "-c", "-q"}
  111. return CmdStream(exec.Command(args[0], args[1:]...), archive)
  112. }
  113. // DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive.
  114. func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
  115. p := pools.BufioReader32KPool
  116. buf := p.Get(archive)
  117. bs, err := buf.Peek(10)
  118. if err != nil {
  119. return nil, err
  120. }
  121. compression := DetectCompression(bs)
  122. switch compression {
  123. case Uncompressed:
  124. readBufWrapper := p.NewReadCloserWrapper(buf, buf)
  125. return readBufWrapper, nil
  126. case Gzip:
  127. gzReader, err := gzip.NewReader(buf)
  128. if err != nil {
  129. return nil, err
  130. }
  131. readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
  132. return readBufWrapper, nil
  133. case Bzip2:
  134. bz2Reader := bzip2.NewReader(buf)
  135. readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
  136. return readBufWrapper, nil
  137. case Xz:
  138. xzReader, err := xzDecompress(buf)
  139. if err != nil {
  140. return nil, err
  141. }
  142. readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
  143. return readBufWrapper, nil
  144. default:
  145. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  146. }
  147. }
  148. // CompressStream compresses the dest with specified compression algorithm.
  149. func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
  150. p := pools.BufioWriter32KPool
  151. buf := p.Get(dest)
  152. switch compression {
  153. case Uncompressed:
  154. writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
  155. return writeBufWrapper, nil
  156. case Gzip:
  157. gzWriter := gzip.NewWriter(dest)
  158. writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
  159. return writeBufWrapper, nil
  160. case Bzip2, Xz:
  161. // archive/bzip2 does not support writing, and there is no xz support at all
  162. // However, this is not a problem as docker only currently generates gzipped tars
  163. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  164. default:
  165. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  166. }
  167. }
  168. // Extension returns the extension of a file that uses the specified compression algorithm.
  169. func (compression *Compression) Extension() string {
  170. switch *compression {
  171. case Uncompressed:
  172. return "tar"
  173. case Bzip2:
  174. return "tar.bz2"
  175. case Gzip:
  176. return "tar.gz"
  177. case Xz:
  178. return "tar.xz"
  179. }
  180. return ""
  181. }
  182. type tarAppender struct {
  183. TarWriter *tar.Writer
  184. Buffer *bufio.Writer
  185. // for hardlink mapping
  186. SeenFiles map[uint64]string
  187. UIDMaps []idtools.IDMap
  188. GIDMaps []idtools.IDMap
  189. }
  190. // canonicalTarName provides a platform-independent and consistent posix-style
  191. //path for files and directories to be archived regardless of the platform.
  192. func canonicalTarName(name string, isDir bool) (string, error) {
  193. name, err := CanonicalTarNameForPath(name)
  194. if err != nil {
  195. return "", err
  196. }
  197. // suffix with '/' for directories
  198. if isDir && !strings.HasSuffix(name, "/") {
  199. name += "/"
  200. }
  201. return name, nil
  202. }
  203. func (ta *tarAppender) addTarFile(path, name string) error {
  204. fi, err := os.Lstat(path)
  205. if err != nil {
  206. return err
  207. }
  208. link := ""
  209. if fi.Mode()&os.ModeSymlink != 0 {
  210. if link, err = os.Readlink(path); err != nil {
  211. return err
  212. }
  213. }
  214. hdr, err := tar.FileInfoHeader(fi, link)
  215. if err != nil {
  216. return err
  217. }
  218. hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
  219. name, err = canonicalTarName(name, fi.IsDir())
  220. if err != nil {
  221. return fmt.Errorf("tar: cannot canonicalize path: %v", err)
  222. }
  223. hdr.Name = name
  224. nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
  225. if err != nil {
  226. return err
  227. }
  228. // if it's a regular file and has more than 1 link,
  229. // it's hardlinked, so set the type flag accordingly
  230. if fi.Mode().IsRegular() && nlink > 1 {
  231. // a link should have a name that it links too
  232. // and that linked name should be first in the tar archive
  233. if oldpath, ok := ta.SeenFiles[inode]; ok {
  234. hdr.Typeflag = tar.TypeLink
  235. hdr.Linkname = oldpath
  236. hdr.Size = 0 // This Must be here for the writer math to add up!
  237. } else {
  238. ta.SeenFiles[inode] = name
  239. }
  240. }
  241. capability, _ := system.Lgetxattr(path, "security.capability")
  242. if capability != nil {
  243. hdr.Xattrs = make(map[string]string)
  244. hdr.Xattrs["security.capability"] = string(capability)
  245. }
  246. //handle re-mapping container ID mappings back to host ID mappings before
  247. //writing tar headers/files
  248. if ta.UIDMaps != nil || ta.GIDMaps != nil {
  249. uid, gid, err := getFileUIDGID(fi.Sys())
  250. if err != nil {
  251. return err
  252. }
  253. xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
  254. if err != nil {
  255. return err
  256. }
  257. xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
  258. if err != nil {
  259. return err
  260. }
  261. hdr.Uid = xUID
  262. hdr.Gid = xGID
  263. }
  264. if err := ta.TarWriter.WriteHeader(hdr); err != nil {
  265. return err
  266. }
  267. if hdr.Typeflag == tar.TypeReg {
  268. file, err := os.Open(path)
  269. if err != nil {
  270. return err
  271. }
  272. ta.Buffer.Reset(ta.TarWriter)
  273. defer ta.Buffer.Reset(nil)
  274. _, err = io.Copy(ta.Buffer, file)
  275. file.Close()
  276. if err != nil {
  277. return err
  278. }
  279. err = ta.Buffer.Flush()
  280. if err != nil {
  281. return err
  282. }
  283. }
  284. return nil
  285. }
  286. func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error {
  287. // hdr.Mode is in linux format, which we can use for sycalls,
  288. // but for os.Foo() calls we need the mode converted to os.FileMode,
  289. // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
  290. hdrInfo := hdr.FileInfo()
  291. switch hdr.Typeflag {
  292. case tar.TypeDir:
  293. // Create directory unless it exists as a directory already.
  294. // In that case we just want to merge the two
  295. if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
  296. if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
  297. return err
  298. }
  299. }
  300. case tar.TypeReg, tar.TypeRegA:
  301. // Source is regular file
  302. file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
  303. if err != nil {
  304. return err
  305. }
  306. if _, err := io.Copy(file, reader); err != nil {
  307. file.Close()
  308. return err
  309. }
  310. file.Close()
  311. case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
  312. // Handle this is an OS-specific way
  313. if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
  314. return err
  315. }
  316. case tar.TypeLink:
  317. targetPath := filepath.Join(extractDir, hdr.Linkname)
  318. // check for hardlink breakout
  319. if !strings.HasPrefix(targetPath, extractDir) {
  320. return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
  321. }
  322. if err := os.Link(targetPath, path); err != nil {
  323. return err
  324. }
  325. case tar.TypeSymlink:
  326. // path -> hdr.Linkname = targetPath
  327. // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
  328. targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
  329. // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
  330. // that symlink would first have to be created, which would be caught earlier, at this very check:
  331. if !strings.HasPrefix(targetPath, extractDir) {
  332. return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
  333. }
  334. if err := os.Symlink(hdr.Linkname, path); err != nil {
  335. return err
  336. }
  337. case tar.TypeXGlobalHeader:
  338. logrus.Debugf("PAX Global Extended Headers found and ignored")
  339. return nil
  340. default:
  341. return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
  342. }
  343. // Lchown is not supported on Windows.
  344. if Lchown && runtime.GOOS != "windows" {
  345. if chownOpts == nil {
  346. chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
  347. }
  348. if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
  349. return err
  350. }
  351. }
  352. for key, value := range hdr.Xattrs {
  353. if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
  354. return err
  355. }
  356. }
  357. // There is no LChmod, so ignore mode for symlink. Also, this
  358. // must happen after chown, as that can modify the file mode
  359. if err := handleLChmod(hdr, path, hdrInfo); err != nil {
  360. return err
  361. }
  362. // system.Chtimes doesn't support a NOFOLLOW flag atm
  363. if hdr.Typeflag == tar.TypeLink {
  364. if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
  365. if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
  366. return err
  367. }
  368. }
  369. } else if hdr.Typeflag != tar.TypeSymlink {
  370. if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
  371. return err
  372. }
  373. } else {
  374. ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
  375. if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
  376. return err
  377. }
  378. }
  379. return nil
  380. }
  381. // Tar creates an archive from the directory at `path`, and returns it as a
  382. // stream of bytes.
  383. func Tar(path string, compression Compression) (io.ReadCloser, error) {
  384. return TarWithOptions(path, &TarOptions{Compression: compression})
  385. }
  386. // TarWithOptions creates an archive from the directory at `path`, only including files whose relative
  387. // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
  388. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
  389. // Fix the source path to work with long path names. This is a no-op
  390. // on platforms other than Windows.
  391. srcPath = fixVolumePathPrefix(srcPath)
  392. patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
  393. if err != nil {
  394. return nil, err
  395. }
  396. pipeReader, pipeWriter := io.Pipe()
  397. compressWriter, err := CompressStream(pipeWriter, options.Compression)
  398. if err != nil {
  399. return nil, err
  400. }
  401. go func() {
  402. ta := &tarAppender{
  403. TarWriter: tar.NewWriter(compressWriter),
  404. Buffer: pools.BufioWriter32KPool.Get(nil),
  405. SeenFiles: make(map[uint64]string),
  406. UIDMaps: options.UIDMaps,
  407. GIDMaps: options.GIDMaps,
  408. }
  409. defer func() {
  410. // Make sure to check the error on Close.
  411. if err := ta.TarWriter.Close(); err != nil {
  412. logrus.Debugf("Can't close tar writer: %s", err)
  413. }
  414. if err := compressWriter.Close(); err != nil {
  415. logrus.Debugf("Can't close compress writer: %s", err)
  416. }
  417. if err := pipeWriter.Close(); err != nil {
  418. logrus.Debugf("Can't close pipe writer: %s", err)
  419. }
  420. }()
  421. // this buffer is needed for the duration of this piped stream
  422. defer pools.BufioWriter32KPool.Put(ta.Buffer)
  423. // In general we log errors here but ignore them because
  424. // during e.g. a diff operation the container can continue
  425. // mutating the filesystem and we can see transient errors
  426. // from this
  427. stat, err := os.Lstat(srcPath)
  428. if err != nil {
  429. return
  430. }
  431. if !stat.IsDir() {
  432. // We can't later join a non-dir with any includes because the
  433. // 'walk' will error if "file/." is stat-ed and "file" is not a
  434. // directory. So, we must split the source path and use the
  435. // basename as the include.
  436. if len(options.IncludeFiles) > 0 {
  437. logrus.Warn("Tar: Can't archive a file with includes")
  438. }
  439. dir, base := SplitPathDirEntry(srcPath)
  440. srcPath = dir
  441. options.IncludeFiles = []string{base}
  442. }
  443. if len(options.IncludeFiles) == 0 {
  444. options.IncludeFiles = []string{"."}
  445. }
  446. seen := make(map[string]bool)
  447. for _, include := range options.IncludeFiles {
  448. rebaseName := options.RebaseNames[include]
  449. walkRoot := getWalkRoot(srcPath, include)
  450. filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
  451. if err != nil {
  452. logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
  453. return nil
  454. }
  455. relFilePath, err := filepath.Rel(srcPath, filePath)
  456. if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
  457. // Error getting relative path OR we are looking
  458. // at the source directory path. Skip in both situations.
  459. return nil
  460. }
  461. if options.IncludeSourceDir && include == "." && relFilePath != "." {
  462. relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
  463. }
  464. skip := false
  465. // If "include" is an exact match for the current file
  466. // then even if there's an "excludePatterns" pattern that
  467. // matches it, don't skip it. IOW, assume an explicit 'include'
  468. // is asking for that file no matter what - which is true
  469. // for some files, like .dockerignore and Dockerfile (sometimes)
  470. if include != relFilePath {
  471. skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
  472. if err != nil {
  473. logrus.Debugf("Error matching %s: %v", relFilePath, err)
  474. return err
  475. }
  476. }
  477. if skip {
  478. if !exceptions && f.IsDir() {
  479. return filepath.SkipDir
  480. }
  481. return nil
  482. }
  483. if seen[relFilePath] {
  484. return nil
  485. }
  486. seen[relFilePath] = true
  487. // Rename the base resource.
  488. if rebaseName != "" {
  489. var replacement string
  490. if rebaseName != string(filepath.Separator) {
  491. // Special case the root directory to replace with an
  492. // empty string instead so that we don't end up with
  493. // double slashes in the paths.
  494. replacement = rebaseName
  495. }
  496. relFilePath = strings.Replace(relFilePath, include, replacement, 1)
  497. }
  498. if err := ta.addTarFile(filePath, relFilePath); err != nil {
  499. logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
  500. }
  501. return nil
  502. })
  503. }
  504. }()
  505. return pipeReader, nil
  506. }
  507. // Unpack unpacks the decompressedArchive to dest with options.
  508. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
  509. tr := tar.NewReader(decompressedArchive)
  510. trBuf := pools.BufioReader32KPool.Get(nil)
  511. defer pools.BufioReader32KPool.Put(trBuf)
  512. var dirs []*tar.Header
  513. remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
  514. if err != nil {
  515. return err
  516. }
  517. // Iterate through the files in the archive.
  518. loop:
  519. for {
  520. hdr, err := tr.Next()
  521. if err == io.EOF {
  522. // end of tar archive
  523. break
  524. }
  525. if err != nil {
  526. return err
  527. }
  528. // Normalize name, for safety and for a simple is-root check
  529. // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
  530. // This keeps "..\" as-is, but normalizes "\..\" to "\".
  531. hdr.Name = filepath.Clean(hdr.Name)
  532. for _, exclude := range options.ExcludePatterns {
  533. if strings.HasPrefix(hdr.Name, exclude) {
  534. continue loop
  535. }
  536. }
  537. // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
  538. // the filepath format for the OS on which the daemon is running. Hence
  539. // the check for a slash-suffix MUST be done in an OS-agnostic way.
  540. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
  541. // Not the root directory, ensure that the parent directory exists
  542. parent := filepath.Dir(hdr.Name)
  543. parentPath := filepath.Join(dest, parent)
  544. if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
  545. err = system.MkdirAll(parentPath, 0777)
  546. if err != nil {
  547. return err
  548. }
  549. }
  550. }
  551. path := filepath.Join(dest, hdr.Name)
  552. rel, err := filepath.Rel(dest, path)
  553. if err != nil {
  554. return err
  555. }
  556. if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
  557. return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
  558. }
  559. // If path exits we almost always just want to remove and replace it
  560. // The only exception is when it is a directory *and* the file from
  561. // the layer is also a directory. Then we want to merge them (i.e.
  562. // just apply the metadata from the layer).
  563. if fi, err := os.Lstat(path); err == nil {
  564. if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
  565. // If NoOverwriteDirNonDir is true then we cannot replace
  566. // an existing directory with a non-directory from the archive.
  567. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
  568. }
  569. if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
  570. // If NoOverwriteDirNonDir is true then we cannot replace
  571. // an existing non-directory with a directory from the archive.
  572. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
  573. }
  574. if fi.IsDir() && hdr.Name == "." {
  575. continue
  576. }
  577. if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
  578. if err := os.RemoveAll(path); err != nil {
  579. return err
  580. }
  581. }
  582. }
  583. trBuf.Reset(tr)
  584. // if the options contain a uid & gid maps, convert header uid/gid
  585. // entries using the maps such that lchown sets the proper mapped
  586. // uid/gid after writing the file. We only perform this mapping if
  587. // the file isn't already owned by the remapped root UID or GID, as
  588. // that specific uid/gid has no mapping from container -> host, and
  589. // those files already have the proper ownership for inside the
  590. // container.
  591. if hdr.Uid != remappedRootUID {
  592. xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
  593. if err != nil {
  594. return err
  595. }
  596. hdr.Uid = xUID
  597. }
  598. if hdr.Gid != remappedRootGID {
  599. xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
  600. if err != nil {
  601. return err
  602. }
  603. hdr.Gid = xGID
  604. }
  605. if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
  606. return err
  607. }
  608. // Directory mtimes must be handled at the end to avoid further
  609. // file creation in them to modify the directory mtime
  610. if hdr.Typeflag == tar.TypeDir {
  611. dirs = append(dirs, hdr)
  612. }
  613. }
  614. for _, hdr := range dirs {
  615. path := filepath.Join(dest, hdr.Name)
  616. if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
  617. return err
  618. }
  619. }
  620. return nil
  621. }
  622. // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
  623. // and unpacks it into the directory at `dest`.
  624. // The archive may be compressed with one of the following algorithms:
  625. // identity (uncompressed), gzip, bzip2, xz.
  626. // FIXME: specify behavior when target path exists vs. doesn't exist.
  627. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
  628. return untarHandler(tarArchive, dest, options, true)
  629. }
  630. // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
  631. // and unpacks it into the directory at `dest`.
  632. // The archive must be an uncompressed stream.
  633. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
  634. return untarHandler(tarArchive, dest, options, false)
  635. }
  636. // Handler for teasing out the automatic decompression
  637. func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
  638. if tarArchive == nil {
  639. return fmt.Errorf("Empty archive")
  640. }
  641. dest = filepath.Clean(dest)
  642. if options == nil {
  643. options = &TarOptions{}
  644. }
  645. if options.ExcludePatterns == nil {
  646. options.ExcludePatterns = []string{}
  647. }
  648. r := tarArchive
  649. if decompress {
  650. decompressedArchive, err := DecompressStream(tarArchive)
  651. if err != nil {
  652. return err
  653. }
  654. defer decompressedArchive.Close()
  655. r = decompressedArchive
  656. }
  657. return Unpack(r, dest, options)
  658. }
  659. // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
  660. // If either Tar or Untar fails, TarUntar aborts and returns the error.
  661. func (archiver *Archiver) TarUntar(src, dst string) error {
  662. logrus.Debugf("TarUntar(%s %s)", src, dst)
  663. archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
  664. if err != nil {
  665. return err
  666. }
  667. defer archive.Close()
  668. var options *TarOptions
  669. if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
  670. options = &TarOptions{
  671. UIDMaps: archiver.UIDMaps,
  672. GIDMaps: archiver.GIDMaps,
  673. }
  674. }
  675. return archiver.Untar(archive, dst, options)
  676. }
  677. // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
  678. // If either Tar or Untar fails, TarUntar aborts and returns the error.
  679. func TarUntar(src, dst string) error {
  680. return defaultArchiver.TarUntar(src, dst)
  681. }
  682. // UntarPath untar a file from path to a destination, src is the source tar file path.
  683. func (archiver *Archiver) UntarPath(src, dst string) error {
  684. archive, err := os.Open(src)
  685. if err != nil {
  686. return err
  687. }
  688. defer archive.Close()
  689. var options *TarOptions
  690. if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
  691. options = &TarOptions{
  692. UIDMaps: archiver.UIDMaps,
  693. GIDMaps: archiver.GIDMaps,
  694. }
  695. }
  696. if err := archiver.Untar(archive, dst, options); err != nil {
  697. return err
  698. }
  699. return nil
  700. }
  701. // UntarPath is a convenience function which looks for an archive
  702. // at filesystem path `src`, and unpacks it at `dst`.
  703. func UntarPath(src, dst string) error {
  704. return defaultArchiver.UntarPath(src, dst)
  705. }
  706. // CopyWithTar creates a tar archive of filesystem path `src`, and
  707. // unpacks it at filesystem path `dst`.
  708. // The archive is streamed directly with fixed buffering and no
  709. // intermediary disk IO.
  710. func (archiver *Archiver) CopyWithTar(src, dst string) error {
  711. srcSt, err := os.Stat(src)
  712. if err != nil {
  713. return err
  714. }
  715. if !srcSt.IsDir() {
  716. return archiver.CopyFileWithTar(src, dst)
  717. }
  718. // Create dst, copy src's content into it
  719. logrus.Debugf("Creating dest directory: %s", dst)
  720. if err := system.MkdirAll(dst, 0755); err != nil {
  721. return err
  722. }
  723. logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
  724. return archiver.TarUntar(src, dst)
  725. }
  726. // CopyWithTar creates a tar archive of filesystem path `src`, and
  727. // unpacks it at filesystem path `dst`.
  728. // The archive is streamed directly with fixed buffering and no
  729. // intermediary disk IO.
  730. func CopyWithTar(src, dst string) error {
  731. return defaultArchiver.CopyWithTar(src, dst)
  732. }
  733. // CopyFileWithTar emulates the behavior of the 'cp' command-line
  734. // for a single file. It copies a regular file from path `src` to
  735. // path `dst`, and preserves all its metadata.
  736. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
  737. logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
  738. srcSt, err := os.Stat(src)
  739. if err != nil {
  740. return err
  741. }
  742. if srcSt.IsDir() {
  743. return fmt.Errorf("Can't copy a directory")
  744. }
  745. // Clean up the trailing slash. This must be done in an operating
  746. // system specific manner.
  747. if dst[len(dst)-1] == os.PathSeparator {
  748. dst = filepath.Join(dst, filepath.Base(src))
  749. }
  750. // Create the holding directory if necessary
  751. if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
  752. return err
  753. }
  754. r, w := io.Pipe()
  755. errC := promise.Go(func() error {
  756. defer w.Close()
  757. srcF, err := os.Open(src)
  758. if err != nil {
  759. return err
  760. }
  761. defer srcF.Close()
  762. hdr, err := tar.FileInfoHeader(srcSt, "")
  763. if err != nil {
  764. return err
  765. }
  766. hdr.Name = filepath.Base(dst)
  767. hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
  768. remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
  769. if err != nil {
  770. return err
  771. }
  772. // only perform mapping if the file being copied isn't already owned by the
  773. // uid or gid of the remapped root in the container
  774. if remappedRootUID != hdr.Uid {
  775. xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
  776. if err != nil {
  777. return err
  778. }
  779. hdr.Uid = xUID
  780. }
  781. if remappedRootGID != hdr.Gid {
  782. xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
  783. if err != nil {
  784. return err
  785. }
  786. hdr.Gid = xGID
  787. }
  788. tw := tar.NewWriter(w)
  789. defer tw.Close()
  790. if err := tw.WriteHeader(hdr); err != nil {
  791. return err
  792. }
  793. if _, err := io.Copy(tw, srcF); err != nil {
  794. return err
  795. }
  796. return nil
  797. })
  798. defer func() {
  799. if er := <-errC; err != nil {
  800. err = er
  801. }
  802. }()
  803. err = archiver.Untar(r, filepath.Dir(dst), nil)
  804. if err != nil {
  805. r.CloseWithError(err)
  806. }
  807. return err
  808. }
  809. // CopyFileWithTar emulates the behavior of the 'cp' command-line
  810. // for a single file. It copies a regular file from path `src` to
  811. // path `dst`, and preserves all its metadata.
  812. //
  813. // Destination handling is in an operating specific manner depending
  814. // where the daemon is running. If `dst` ends with a trailing slash
  815. // the final destination path will be `dst/base(src)` (Linux) or
  816. // `dst\base(src)` (Windows).
  817. func CopyFileWithTar(src, dst string) (err error) {
  818. return defaultArchiver.CopyFileWithTar(src, dst)
  819. }
  820. // CmdStream executes a command, and returns its stdout as a stream.
  821. // If the command fails to run or doesn't complete successfully, an error
  822. // will be returned, including anything written on stderr.
  823. func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
  824. if input != nil {
  825. stdin, err := cmd.StdinPipe()
  826. if err != nil {
  827. return nil, err
  828. }
  829. // Write stdin if any
  830. go func() {
  831. io.Copy(stdin, input)
  832. stdin.Close()
  833. }()
  834. }
  835. stdout, err := cmd.StdoutPipe()
  836. if err != nil {
  837. return nil, err
  838. }
  839. stderr, err := cmd.StderrPipe()
  840. if err != nil {
  841. return nil, err
  842. }
  843. pipeR, pipeW := io.Pipe()
  844. errChan := make(chan []byte)
  845. // Collect stderr, we will use it in case of an error
  846. go func() {
  847. errText, e := ioutil.ReadAll(stderr)
  848. if e != nil {
  849. errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
  850. }
  851. errChan <- errText
  852. }()
  853. // Copy stdout to the returned pipe
  854. go func() {
  855. _, err := io.Copy(pipeW, stdout)
  856. if err != nil {
  857. pipeW.CloseWithError(err)
  858. }
  859. errText := <-errChan
  860. if err := cmd.Wait(); err != nil {
  861. pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
  862. } else {
  863. pipeW.Close()
  864. }
  865. }()
  866. // Run the command and return the pipe
  867. if err := cmd.Start(); err != nil {
  868. return nil, err
  869. }
  870. return pipeR, nil
  871. }
  872. // NewTempArchive reads the content of src into a temporary file, and returns the contents
  873. // of that file as an archive. The archive can only be read once - as soon as reading completes,
  874. // the file will be deleted.
  875. func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
  876. f, err := ioutil.TempFile(dir, "")
  877. if err != nil {
  878. return nil, err
  879. }
  880. if _, err := io.Copy(f, src); err != nil {
  881. return nil, err
  882. }
  883. if _, err := f.Seek(0, 0); err != nil {
  884. return nil, err
  885. }
  886. st, err := f.Stat()
  887. if err != nil {
  888. return nil, err
  889. }
  890. size := st.Size()
  891. return &TempArchive{File: f, Size: size}, nil
  892. }
  893. // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
  894. // the file will be deleted.
  895. type TempArchive struct {
  896. *os.File
  897. Size int64 // Pre-computed from Stat().Size() as a convenience
  898. read int64
  899. closed bool
  900. }
  901. // Close closes the underlying file if it's still open, or does a no-op
  902. // to allow callers to try to close the TempArchive multiple times safely.
  903. func (archive *TempArchive) Close() error {
  904. if archive.closed {
  905. return nil
  906. }
  907. archive.closed = true
  908. return archive.File.Close()
  909. }
  910. func (archive *TempArchive) Read(data []byte) (int, error) {
  911. n, err := archive.File.Read(data)
  912. archive.read += int64(n)
  913. if err != nil || archive.read == archive.Size {
  914. archive.Close()
  915. os.Remove(archive.File.Name())
  916. }
  917. return n, err
  918. }