archive.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. package archive
  2. import (
  3. "archive/tar"
  4. "bufio"
  5. "bytes"
  6. "compress/bzip2"
  7. "compress/gzip"
  8. "errors"
  9. "fmt"
  10. "io"
  11. "io/ioutil"
  12. "os"
  13. "os/exec"
  14. "path"
  15. "path/filepath"
  16. "strings"
  17. "syscall"
  18. "github.com/Sirupsen/logrus"
  19. "github.com/docker/docker/pkg/fileutils"
  20. "github.com/docker/docker/pkg/pools"
  21. "github.com/docker/docker/pkg/promise"
  22. "github.com/docker/docker/pkg/system"
  23. )
  24. type (
  25. Archive io.ReadCloser
  26. ArchiveReader io.Reader
  27. Compression int
  28. TarOptions struct {
  29. IncludeFiles []string
  30. ExcludePatterns []string
  31. Compression Compression
  32. NoLchown bool
  33. Name string
  34. }
  35. // Archiver allows the reuse of most utility functions of this package
  36. // with a pluggable Untar function.
  37. Archiver struct {
  38. Untar func(io.Reader, string, *TarOptions) error
  39. }
  40. // breakoutError is used to differentiate errors related to breaking out
  41. // When testing archive breakout in the unit tests, this error is expected
  42. // in order for the test to pass.
  43. breakoutError error
  44. )
  45. var (
  46. ErrNotImplemented = errors.New("Function not implemented")
  47. defaultArchiver = &Archiver{Untar}
  48. )
  49. const (
  50. Uncompressed Compression = iota
  51. Bzip2
  52. Gzip
  53. Xz
  54. )
  55. func IsArchive(header []byte) bool {
  56. compression := DetectCompression(header)
  57. if compression != Uncompressed {
  58. return true
  59. }
  60. r := tar.NewReader(bytes.NewBuffer(header))
  61. _, err := r.Next()
  62. return err == nil
  63. }
  64. func DetectCompression(source []byte) Compression {
  65. for compression, m := range map[Compression][]byte{
  66. Bzip2: {0x42, 0x5A, 0x68},
  67. Gzip: {0x1F, 0x8B, 0x08},
  68. Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
  69. } {
  70. if len(source) < len(m) {
  71. logrus.Debugf("Len too short")
  72. continue
  73. }
  74. if bytes.Compare(m, source[:len(m)]) == 0 {
  75. return compression
  76. }
  77. }
  78. return Uncompressed
  79. }
  80. func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
  81. args := []string{"xz", "-d", "-c", "-q"}
  82. return CmdStream(exec.Command(args[0], args[1:]...), archive)
  83. }
  84. func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
  85. p := pools.BufioReader32KPool
  86. buf := p.Get(archive)
  87. bs, err := buf.Peek(10)
  88. if err != nil {
  89. return nil, err
  90. }
  91. compression := DetectCompression(bs)
  92. switch compression {
  93. case Uncompressed:
  94. readBufWrapper := p.NewReadCloserWrapper(buf, buf)
  95. return readBufWrapper, nil
  96. case Gzip:
  97. gzReader, err := gzip.NewReader(buf)
  98. if err != nil {
  99. return nil, err
  100. }
  101. readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
  102. return readBufWrapper, nil
  103. case Bzip2:
  104. bz2Reader := bzip2.NewReader(buf)
  105. readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
  106. return readBufWrapper, nil
  107. case Xz:
  108. xzReader, err := xzDecompress(buf)
  109. if err != nil {
  110. return nil, err
  111. }
  112. readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
  113. return readBufWrapper, nil
  114. default:
  115. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  116. }
  117. }
  118. func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
  119. p := pools.BufioWriter32KPool
  120. buf := p.Get(dest)
  121. switch compression {
  122. case Uncompressed:
  123. writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
  124. return writeBufWrapper, nil
  125. case Gzip:
  126. gzWriter := gzip.NewWriter(dest)
  127. writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
  128. return writeBufWrapper, nil
  129. case Bzip2, Xz:
  130. // archive/bzip2 does not support writing, and there is no xz support at all
  131. // However, this is not a problem as docker only currently generates gzipped tars
  132. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  133. default:
  134. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  135. }
  136. }
  137. func (compression *Compression) Extension() string {
  138. switch *compression {
  139. case Uncompressed:
  140. return "tar"
  141. case Bzip2:
  142. return "tar.bz2"
  143. case Gzip:
  144. return "tar.gz"
  145. case Xz:
  146. return "tar.xz"
  147. }
  148. return ""
  149. }
  150. type tarAppender struct {
  151. TarWriter *tar.Writer
  152. Buffer *bufio.Writer
  153. // for hardlink mapping
  154. SeenFiles map[uint64]string
  155. }
  156. // canonicalTarName provides a platform-independent and consistent posix-style
  157. //path for files and directories to be archived regardless of the platform.
  158. func canonicalTarName(name string, isDir bool) (string, error) {
  159. name, err := CanonicalTarNameForPath(name)
  160. if err != nil {
  161. return "", err
  162. }
  163. // suffix with '/' for directories
  164. if isDir && !strings.HasSuffix(name, "/") {
  165. name += "/"
  166. }
  167. return name, nil
  168. }
  169. func (ta *tarAppender) addTarFile(path, name string) error {
  170. fi, err := os.Lstat(path)
  171. if err != nil {
  172. return err
  173. }
  174. link := ""
  175. if fi.Mode()&os.ModeSymlink != 0 {
  176. if link, err = os.Readlink(path); err != nil {
  177. return err
  178. }
  179. }
  180. hdr, err := tar.FileInfoHeader(fi, link)
  181. if err != nil {
  182. return err
  183. }
  184. hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
  185. name, err = canonicalTarName(name, fi.IsDir())
  186. if err != nil {
  187. return fmt.Errorf("tar: cannot canonicalize path: %v", err)
  188. }
  189. hdr.Name = name
  190. nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
  191. if err != nil {
  192. return err
  193. }
  194. // if it's a regular file and has more than 1 link,
  195. // it's hardlinked, so set the type flag accordingly
  196. if fi.Mode().IsRegular() && nlink > 1 {
  197. // a link should have a name that it links too
  198. // and that linked name should be first in the tar archive
  199. if oldpath, ok := ta.SeenFiles[inode]; ok {
  200. hdr.Typeflag = tar.TypeLink
  201. hdr.Linkname = oldpath
  202. hdr.Size = 0 // This Must be here for the writer math to add up!
  203. } else {
  204. ta.SeenFiles[inode] = name
  205. }
  206. }
  207. capability, _ := system.Lgetxattr(path, "security.capability")
  208. if capability != nil {
  209. hdr.Xattrs = make(map[string]string)
  210. hdr.Xattrs["security.capability"] = string(capability)
  211. }
  212. if err := ta.TarWriter.WriteHeader(hdr); err != nil {
  213. return err
  214. }
  215. if hdr.Typeflag == tar.TypeReg {
  216. file, err := os.Open(path)
  217. if err != nil {
  218. return err
  219. }
  220. ta.Buffer.Reset(ta.TarWriter)
  221. defer ta.Buffer.Reset(nil)
  222. _, err = io.Copy(ta.Buffer, file)
  223. file.Close()
  224. if err != nil {
  225. return err
  226. }
  227. err = ta.Buffer.Flush()
  228. if err != nil {
  229. return err
  230. }
  231. }
  232. return nil
  233. }
  234. func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error {
  235. // hdr.Mode is in linux format, which we can use for sycalls,
  236. // but for os.Foo() calls we need the mode converted to os.FileMode,
  237. // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
  238. hdrInfo := hdr.FileInfo()
  239. switch hdr.Typeflag {
  240. case tar.TypeDir:
  241. // Create directory unless it exists as a directory already.
  242. // In that case we just want to merge the two
  243. if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
  244. if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
  245. return err
  246. }
  247. }
  248. case tar.TypeReg, tar.TypeRegA:
  249. // Source is regular file
  250. file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
  251. if err != nil {
  252. return err
  253. }
  254. if _, err := io.Copy(file, reader); err != nil {
  255. file.Close()
  256. return err
  257. }
  258. file.Close()
  259. case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
  260. mode := uint32(hdr.Mode & 07777)
  261. switch hdr.Typeflag {
  262. case tar.TypeBlock:
  263. mode |= syscall.S_IFBLK
  264. case tar.TypeChar:
  265. mode |= syscall.S_IFCHR
  266. case tar.TypeFifo:
  267. mode |= syscall.S_IFIFO
  268. }
  269. if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
  270. return err
  271. }
  272. case tar.TypeLink:
  273. targetPath := filepath.Join(extractDir, hdr.Linkname)
  274. // check for hardlink breakout
  275. if !strings.HasPrefix(targetPath, extractDir) {
  276. return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
  277. }
  278. if err := os.Link(targetPath, path); err != nil {
  279. return err
  280. }
  281. case tar.TypeSymlink:
  282. // path -> hdr.Linkname = targetPath
  283. // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
  284. targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
  285. // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
  286. // that symlink would first have to be created, which would be caught earlier, at this very check:
  287. if !strings.HasPrefix(targetPath, extractDir) {
  288. return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
  289. }
  290. if err := os.Symlink(hdr.Linkname, path); err != nil {
  291. return err
  292. }
  293. case tar.TypeXGlobalHeader:
  294. logrus.Debugf("PAX Global Extended Headers found and ignored")
  295. return nil
  296. default:
  297. return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
  298. }
  299. if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
  300. return err
  301. }
  302. for key, value := range hdr.Xattrs {
  303. if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
  304. return err
  305. }
  306. }
  307. // There is no LChmod, so ignore mode for symlink. Also, this
  308. // must happen after chown, as that can modify the file mode
  309. if hdr.Typeflag == tar.TypeLink {
  310. if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
  311. if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
  312. return err
  313. }
  314. }
  315. } else if hdr.Typeflag != tar.TypeSymlink {
  316. if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
  317. return err
  318. }
  319. }
  320. ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
  321. // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
  322. if hdr.Typeflag == tar.TypeLink {
  323. if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
  324. if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
  325. return err
  326. }
  327. }
  328. } else if hdr.Typeflag != tar.TypeSymlink {
  329. if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
  330. return err
  331. }
  332. } else {
  333. if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
  334. return err
  335. }
  336. }
  337. return nil
  338. }
  339. // Tar creates an archive from the directory at `path`, and returns it as a
  340. // stream of bytes.
  341. func Tar(path string, compression Compression) (io.ReadCloser, error) {
  342. return TarWithOptions(path, &TarOptions{Compression: compression})
  343. }
  344. // TarWithOptions creates an archive from the directory at `path`, only including files whose relative
  345. // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
  346. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
  347. patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
  348. if err != nil {
  349. return nil, err
  350. }
  351. pipeReader, pipeWriter := io.Pipe()
  352. compressWriter, err := CompressStream(pipeWriter, options.Compression)
  353. if err != nil {
  354. return nil, err
  355. }
  356. go func() {
  357. ta := &tarAppender{
  358. TarWriter: tar.NewWriter(compressWriter),
  359. Buffer: pools.BufioWriter32KPool.Get(nil),
  360. SeenFiles: make(map[uint64]string),
  361. }
  362. // this buffer is needed for the duration of this piped stream
  363. defer pools.BufioWriter32KPool.Put(ta.Buffer)
  364. // In general we log errors here but ignore them because
  365. // during e.g. a diff operation the container can continue
  366. // mutating the filesystem and we can see transient errors
  367. // from this
  368. if options.IncludeFiles == nil {
  369. options.IncludeFiles = []string{"."}
  370. }
  371. seen := make(map[string]bool)
  372. var renamedRelFilePath string // For when tar.Options.Name is set
  373. for _, include := range options.IncludeFiles {
  374. filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
  375. if err != nil {
  376. logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
  377. return nil
  378. }
  379. relFilePath, err := filepath.Rel(srcPath, filePath)
  380. if err != nil || (relFilePath == "." && f.IsDir()) {
  381. // Error getting relative path OR we are looking
  382. // at the root path. Skip in both situations.
  383. return nil
  384. }
  385. skip := false
  386. // If "include" is an exact match for the current file
  387. // then even if there's an "excludePatterns" pattern that
  388. // matches it, don't skip it. IOW, assume an explicit 'include'
  389. // is asking for that file no matter what - which is true
  390. // for some files, like .dockerignore and Dockerfile (sometimes)
  391. if include != relFilePath {
  392. skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
  393. if err != nil {
  394. logrus.Debugf("Error matching %s", relFilePath, err)
  395. return err
  396. }
  397. }
  398. if skip {
  399. if !exceptions && f.IsDir() {
  400. return filepath.SkipDir
  401. }
  402. return nil
  403. }
  404. if seen[relFilePath] {
  405. return nil
  406. }
  407. seen[relFilePath] = true
  408. // Rename the base resource
  409. if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
  410. renamedRelFilePath = relFilePath
  411. }
  412. // Set this to make sure the items underneath also get renamed
  413. if options.Name != "" {
  414. relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
  415. }
  416. if err := ta.addTarFile(filePath, relFilePath); err != nil {
  417. logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
  418. }
  419. return nil
  420. })
  421. }
  422. // Make sure to check the error on Close.
  423. if err := ta.TarWriter.Close(); err != nil {
  424. logrus.Debugf("Can't close tar writer: %s", err)
  425. }
  426. if err := compressWriter.Close(); err != nil {
  427. logrus.Debugf("Can't close compress writer: %s", err)
  428. }
  429. if err := pipeWriter.Close(); err != nil {
  430. logrus.Debugf("Can't close pipe writer: %s", err)
  431. }
  432. }()
  433. return pipeReader, nil
  434. }
  435. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
  436. tr := tar.NewReader(decompressedArchive)
  437. trBuf := pools.BufioReader32KPool.Get(nil)
  438. defer pools.BufioReader32KPool.Put(trBuf)
  439. var dirs []*tar.Header
  440. // Iterate through the files in the archive.
  441. loop:
  442. for {
  443. hdr, err := tr.Next()
  444. if err == io.EOF {
  445. // end of tar archive
  446. break
  447. }
  448. if err != nil {
  449. return err
  450. }
  451. // Normalize name, for safety and for a simple is-root check
  452. // This keeps "../" as-is, but normalizes "/../" to "/"
  453. hdr.Name = filepath.Clean(hdr.Name)
  454. for _, exclude := range options.ExcludePatterns {
  455. if strings.HasPrefix(hdr.Name, exclude) {
  456. continue loop
  457. }
  458. }
  459. if !strings.HasSuffix(hdr.Name, "/") {
  460. // Not the root directory, ensure that the parent directory exists
  461. parent := filepath.Dir(hdr.Name)
  462. parentPath := filepath.Join(dest, parent)
  463. if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
  464. err = os.MkdirAll(parentPath, 0777)
  465. if err != nil {
  466. return err
  467. }
  468. }
  469. }
  470. path := filepath.Join(dest, hdr.Name)
  471. rel, err := filepath.Rel(dest, path)
  472. if err != nil {
  473. return err
  474. }
  475. if strings.HasPrefix(rel, "../") {
  476. return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
  477. }
  478. // If path exits we almost always just want to remove and replace it
  479. // The only exception is when it is a directory *and* the file from
  480. // the layer is also a directory. Then we want to merge them (i.e.
  481. // just apply the metadata from the layer).
  482. if fi, err := os.Lstat(path); err == nil {
  483. if fi.IsDir() && hdr.Name == "." {
  484. continue
  485. }
  486. if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
  487. if err := os.RemoveAll(path); err != nil {
  488. return err
  489. }
  490. }
  491. }
  492. trBuf.Reset(tr)
  493. if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil {
  494. return err
  495. }
  496. // Directory mtimes must be handled at the end to avoid further
  497. // file creation in them to modify the directory mtime
  498. if hdr.Typeflag == tar.TypeDir {
  499. dirs = append(dirs, hdr)
  500. }
  501. }
  502. for _, hdr := range dirs {
  503. path := filepath.Join(dest, hdr.Name)
  504. ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
  505. if err := syscall.UtimesNano(path, ts); err != nil {
  506. return err
  507. }
  508. }
  509. return nil
  510. }
  511. // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
  512. // and unpacks it into the directory at `dest`.
  513. // The archive may be compressed with one of the following algorithms:
  514. // identity (uncompressed), gzip, bzip2, xz.
  515. // FIXME: specify behavior when target path exists vs. doesn't exist.
  516. func Untar(archive io.Reader, dest string, options *TarOptions) error {
  517. if archive == nil {
  518. return fmt.Errorf("Empty archive")
  519. }
  520. dest = filepath.Clean(dest)
  521. if options == nil {
  522. options = &TarOptions{}
  523. }
  524. if options.ExcludePatterns == nil {
  525. options.ExcludePatterns = []string{}
  526. }
  527. decompressedArchive, err := DecompressStream(archive)
  528. if err != nil {
  529. return err
  530. }
  531. defer decompressedArchive.Close()
  532. return Unpack(decompressedArchive, dest, options)
  533. }
  534. func (archiver *Archiver) TarUntar(src, dst string) error {
  535. logrus.Debugf("TarUntar(%s %s)", src, dst)
  536. archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
  537. if err != nil {
  538. return err
  539. }
  540. defer archive.Close()
  541. return archiver.Untar(archive, dst, nil)
  542. }
  543. // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
  544. // If either Tar or Untar fails, TarUntar aborts and returns the error.
  545. func TarUntar(src, dst string) error {
  546. return defaultArchiver.TarUntar(src, dst)
  547. }
  548. func (archiver *Archiver) UntarPath(src, dst string) error {
  549. archive, err := os.Open(src)
  550. if err != nil {
  551. return err
  552. }
  553. defer archive.Close()
  554. if err := archiver.Untar(archive, dst, nil); err != nil {
  555. return err
  556. }
  557. return nil
  558. }
  559. // UntarPath is a convenience function which looks for an archive
  560. // at filesystem path `src`, and unpacks it at `dst`.
  561. func UntarPath(src, dst string) error {
  562. return defaultArchiver.UntarPath(src, dst)
  563. }
  564. func (archiver *Archiver) CopyWithTar(src, dst string) error {
  565. srcSt, err := os.Stat(src)
  566. if err != nil {
  567. return err
  568. }
  569. if !srcSt.IsDir() {
  570. return archiver.CopyFileWithTar(src, dst)
  571. }
  572. // Create dst, copy src's content into it
  573. logrus.Debugf("Creating dest directory: %s", dst)
  574. if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
  575. return err
  576. }
  577. logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
  578. return archiver.TarUntar(src, dst)
  579. }
  580. // CopyWithTar creates a tar archive of filesystem path `src`, and
  581. // unpacks it at filesystem path `dst`.
  582. // The archive is streamed directly with fixed buffering and no
  583. // intermediary disk IO.
  584. func CopyWithTar(src, dst string) error {
  585. return defaultArchiver.CopyWithTar(src, dst)
  586. }
  587. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
  588. logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
  589. srcSt, err := os.Stat(src)
  590. if err != nil {
  591. return err
  592. }
  593. if srcSt.IsDir() {
  594. return fmt.Errorf("Can't copy a directory")
  595. }
  596. // Clean up the trailing /
  597. if dst[len(dst)-1] == '/' {
  598. dst = path.Join(dst, filepath.Base(src))
  599. }
  600. // Create the holding directory if necessary
  601. if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
  602. return err
  603. }
  604. r, w := io.Pipe()
  605. errC := promise.Go(func() error {
  606. defer w.Close()
  607. srcF, err := os.Open(src)
  608. if err != nil {
  609. return err
  610. }
  611. defer srcF.Close()
  612. hdr, err := tar.FileInfoHeader(srcSt, "")
  613. if err != nil {
  614. return err
  615. }
  616. hdr.Name = filepath.Base(dst)
  617. hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
  618. tw := tar.NewWriter(w)
  619. defer tw.Close()
  620. if err := tw.WriteHeader(hdr); err != nil {
  621. return err
  622. }
  623. if _, err := io.Copy(tw, srcF); err != nil {
  624. return err
  625. }
  626. return nil
  627. })
  628. defer func() {
  629. if er := <-errC; err != nil {
  630. err = er
  631. }
  632. }()
  633. return archiver.Untar(r, filepath.Dir(dst), nil)
  634. }
  635. // CopyFileWithTar emulates the behavior of the 'cp' command-line
  636. // for a single file. It copies a regular file from path `src` to
  637. // path `dst`, and preserves all its metadata.
  638. //
  639. // If `dst` ends with a trailing slash '/', the final destination path
  640. // will be `dst/base(src)`.
  641. func CopyFileWithTar(src, dst string) (err error) {
  642. return defaultArchiver.CopyFileWithTar(src, dst)
  643. }
  644. // CmdStream executes a command, and returns its stdout as a stream.
  645. // If the command fails to run or doesn't complete successfully, an error
  646. // will be returned, including anything written on stderr.
  647. func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
  648. if input != nil {
  649. stdin, err := cmd.StdinPipe()
  650. if err != nil {
  651. return nil, err
  652. }
  653. // Write stdin if any
  654. go func() {
  655. io.Copy(stdin, input)
  656. stdin.Close()
  657. }()
  658. }
  659. stdout, err := cmd.StdoutPipe()
  660. if err != nil {
  661. return nil, err
  662. }
  663. stderr, err := cmd.StderrPipe()
  664. if err != nil {
  665. return nil, err
  666. }
  667. pipeR, pipeW := io.Pipe()
  668. errChan := make(chan []byte)
  669. // Collect stderr, we will use it in case of an error
  670. go func() {
  671. errText, e := ioutil.ReadAll(stderr)
  672. if e != nil {
  673. errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
  674. }
  675. errChan <- errText
  676. }()
  677. // Copy stdout to the returned pipe
  678. go func() {
  679. _, err := io.Copy(pipeW, stdout)
  680. if err != nil {
  681. pipeW.CloseWithError(err)
  682. }
  683. errText := <-errChan
  684. if err := cmd.Wait(); err != nil {
  685. pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
  686. } else {
  687. pipeW.Close()
  688. }
  689. }()
  690. // Run the command and return the pipe
  691. if err := cmd.Start(); err != nil {
  692. return nil, err
  693. }
  694. return pipeR, nil
  695. }
  696. // NewTempArchive reads the content of src into a temporary file, and returns the contents
  697. // of that file as an archive. The archive can only be read once - as soon as reading completes,
  698. // the file will be deleted.
  699. func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
  700. f, err := ioutil.TempFile(dir, "")
  701. if err != nil {
  702. return nil, err
  703. }
  704. if _, err := io.Copy(f, src); err != nil {
  705. return nil, err
  706. }
  707. if _, err := f.Seek(0, 0); err != nil {
  708. return nil, err
  709. }
  710. st, err := f.Stat()
  711. if err != nil {
  712. return nil, err
  713. }
  714. size := st.Size()
  715. return &TempArchive{File: f, Size: size}, nil
  716. }
  717. type TempArchive struct {
  718. *os.File
  719. Size int64 // Pre-computed from Stat().Size() as a convenience
  720. read int64
  721. closed bool
  722. }
  723. // Close closes the underlying file if it's still open, or does a no-op
  724. // to allow callers to try to close the TempArchive multiple times safely.
  725. func (archive *TempArchive) Close() error {
  726. if archive.closed {
  727. return nil
  728. }
  729. archive.closed = true
  730. return archive.File.Close()
  731. }
  732. func (archive *TempArchive) Read(data []byte) (int, error) {
  733. n, err := archive.File.Read(data)
  734. archive.read += int64(n)
  735. if err != nil || archive.read == archive.Size {
  736. archive.Close()
  737. os.Remove(archive.File.Name())
  738. }
  739. return n, err
  740. }