archive.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835
  1. package archive
  2. import (
  3. "bufio"
  4. "bytes"
  5. "compress/bzip2"
  6. "compress/gzip"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "io/ioutil"
  11. "os"
  12. "os/exec"
  13. "path"
  14. "path/filepath"
  15. "strings"
  16. "syscall"
  17. "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
  18. log "github.com/Sirupsen/logrus"
  19. "github.com/docker/docker/pkg/fileutils"
  20. "github.com/docker/docker/pkg/pools"
  21. "github.com/docker/docker/pkg/promise"
  22. "github.com/docker/docker/pkg/system"
  23. )
  24. type (
  25. Archive io.ReadCloser
  26. ArchiveReader io.Reader
  27. Compression int
  28. TarOptions struct {
  29. IncludeFiles []string
  30. ExcludePatterns []string
  31. Compression Compression
  32. NoLchown bool
  33. Name string
  34. }
  35. // Archiver allows the reuse of most utility functions of this package
  36. // with a pluggable Untar function.
  37. Archiver struct {
  38. Untar func(io.Reader, string, *TarOptions) error
  39. }
  40. // breakoutError is used to differentiate errors related to breaking out
  41. // When testing archive breakout in the unit tests, this error is expected
  42. // in order for the test to pass.
  43. breakoutError error
  44. )
  45. var (
  46. ErrNotImplemented = errors.New("Function not implemented")
  47. defaultArchiver = &Archiver{Untar}
  48. )
  49. const (
  50. Uncompressed Compression = iota
  51. Bzip2
  52. Gzip
  53. Xz
  54. )
  55. func IsArchive(header []byte) bool {
  56. compression := DetectCompression(header)
  57. if compression != Uncompressed {
  58. return true
  59. }
  60. r := tar.NewReader(bytes.NewBuffer(header))
  61. _, err := r.Next()
  62. return err == nil
  63. }
  64. func DetectCompression(source []byte) Compression {
  65. for compression, m := range map[Compression][]byte{
  66. Bzip2: {0x42, 0x5A, 0x68},
  67. Gzip: {0x1F, 0x8B, 0x08},
  68. Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
  69. } {
  70. if len(source) < len(m) {
  71. log.Debugf("Len too short")
  72. continue
  73. }
  74. if bytes.Compare(m, source[:len(m)]) == 0 {
  75. return compression
  76. }
  77. }
  78. return Uncompressed
  79. }
  80. func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
  81. args := []string{"xz", "-d", "-c", "-q"}
  82. return CmdStream(exec.Command(args[0], args[1:]...), archive)
  83. }
  84. func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
  85. p := pools.BufioReader32KPool
  86. buf := p.Get(archive)
  87. bs, err := buf.Peek(10)
  88. if err != nil {
  89. return nil, err
  90. }
  91. compression := DetectCompression(bs)
  92. switch compression {
  93. case Uncompressed:
  94. readBufWrapper := p.NewReadCloserWrapper(buf, buf)
  95. return readBufWrapper, nil
  96. case Gzip:
  97. gzReader, err := gzip.NewReader(buf)
  98. if err != nil {
  99. return nil, err
  100. }
  101. readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
  102. return readBufWrapper, nil
  103. case Bzip2:
  104. bz2Reader := bzip2.NewReader(buf)
  105. readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
  106. return readBufWrapper, nil
  107. case Xz:
  108. xzReader, err := xzDecompress(buf)
  109. if err != nil {
  110. return nil, err
  111. }
  112. readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
  113. return readBufWrapper, nil
  114. default:
  115. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  116. }
  117. }
  118. func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
  119. p := pools.BufioWriter32KPool
  120. buf := p.Get(dest)
  121. switch compression {
  122. case Uncompressed:
  123. writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
  124. return writeBufWrapper, nil
  125. case Gzip:
  126. gzWriter := gzip.NewWriter(dest)
  127. writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
  128. return writeBufWrapper, nil
  129. case Bzip2, Xz:
  130. // archive/bzip2 does not support writing, and there is no xz support at all
  131. // However, this is not a problem as docker only currently generates gzipped tars
  132. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  133. default:
  134. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  135. }
  136. }
  137. func (compression *Compression) Extension() string {
  138. switch *compression {
  139. case Uncompressed:
  140. return "tar"
  141. case Bzip2:
  142. return "tar.bz2"
  143. case Gzip:
  144. return "tar.gz"
  145. case Xz:
  146. return "tar.xz"
  147. }
  148. return ""
  149. }
  150. type tarAppender struct {
  151. TarWriter *tar.Writer
  152. Buffer *bufio.Writer
  153. // for hardlink mapping
  154. SeenFiles map[uint64]string
  155. }
  156. // canonicalTarName provides a platform-independent and consistent posix-style
  157. //path for files and directories to be archived regardless of the platform.
  158. func canonicalTarName(name string, isDir bool) (string, error) {
  159. name, err := CanonicalTarNameForPath(name)
  160. if err != nil {
  161. return "", err
  162. }
  163. // suffix with '/' for directories
  164. if isDir && !strings.HasSuffix(name, "/") {
  165. name += "/"
  166. }
  167. return name, nil
  168. }
  169. func (ta *tarAppender) addTarFile(path, name string) error {
  170. fi, err := os.Lstat(path)
  171. if err != nil {
  172. return err
  173. }
  174. link := ""
  175. if fi.Mode()&os.ModeSymlink != 0 {
  176. if link, err = os.Readlink(path); err != nil {
  177. return err
  178. }
  179. }
  180. hdr, err := tar.FileInfoHeader(fi, link)
  181. if err != nil {
  182. return err
  183. }
  184. hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
  185. name, err = canonicalTarName(name, fi.IsDir())
  186. if err != nil {
  187. return fmt.Errorf("tar: cannot canonicalize path: %v", err)
  188. }
  189. hdr.Name = name
  190. nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
  191. if err != nil {
  192. return err
  193. }
  194. // if it's a regular file and has more than 1 link,
  195. // it's hardlinked, so set the type flag accordingly
  196. if fi.Mode().IsRegular() && nlink > 1 {
  197. // a link should have a name that it links too
  198. // and that linked name should be first in the tar archive
  199. if oldpath, ok := ta.SeenFiles[inode]; ok {
  200. hdr.Typeflag = tar.TypeLink
  201. hdr.Linkname = oldpath
  202. hdr.Size = 0 // This Must be here for the writer math to add up!
  203. } else {
  204. ta.SeenFiles[inode] = name
  205. }
  206. }
  207. capability, _ := system.Lgetxattr(path, "security.capability")
  208. if capability != nil {
  209. hdr.Xattrs = make(map[string]string)
  210. hdr.Xattrs["security.capability"] = string(capability)
  211. }
  212. if err := ta.TarWriter.WriteHeader(hdr); err != nil {
  213. return err
  214. }
  215. if hdr.Typeflag == tar.TypeReg {
  216. file, err := os.Open(path)
  217. if err != nil {
  218. return err
  219. }
  220. ta.Buffer.Reset(ta.TarWriter)
  221. defer ta.Buffer.Reset(nil)
  222. _, err = io.Copy(ta.Buffer, file)
  223. file.Close()
  224. if err != nil {
  225. return err
  226. }
  227. err = ta.Buffer.Flush()
  228. if err != nil {
  229. return err
  230. }
  231. }
  232. return nil
  233. }
  234. func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error {
  235. // hdr.Mode is in linux format, which we can use for sycalls,
  236. // but for os.Foo() calls we need the mode converted to os.FileMode,
  237. // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
  238. hdrInfo := hdr.FileInfo()
  239. switch hdr.Typeflag {
  240. case tar.TypeDir:
  241. // Create directory unless it exists as a directory already.
  242. // In that case we just want to merge the two
  243. if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
  244. if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
  245. return err
  246. }
  247. }
  248. case tar.TypeReg, tar.TypeRegA:
  249. // Source is regular file
  250. file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
  251. if err != nil {
  252. return err
  253. }
  254. if _, err := io.Copy(file, reader); err != nil {
  255. file.Close()
  256. return err
  257. }
  258. file.Close()
  259. case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
  260. mode := uint32(hdr.Mode & 07777)
  261. switch hdr.Typeflag {
  262. case tar.TypeBlock:
  263. mode |= syscall.S_IFBLK
  264. case tar.TypeChar:
  265. mode |= syscall.S_IFCHR
  266. case tar.TypeFifo:
  267. mode |= syscall.S_IFIFO
  268. }
  269. if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
  270. return err
  271. }
  272. case tar.TypeLink:
  273. targetPath := filepath.Join(extractDir, hdr.Linkname)
  274. // check for hardlink breakout
  275. if !strings.HasPrefix(targetPath, extractDir) {
  276. return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
  277. }
  278. if err := os.Link(targetPath, path); err != nil {
  279. return err
  280. }
  281. case tar.TypeSymlink:
  282. // path -> hdr.Linkname = targetPath
  283. // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
  284. targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
  285. // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
  286. // that symlink would first have to be created, which would be caught earlier, at this very check:
  287. if !strings.HasPrefix(targetPath, extractDir) {
  288. return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
  289. }
  290. if err := os.Symlink(hdr.Linkname, path); err != nil {
  291. return err
  292. }
  293. case tar.TypeXGlobalHeader:
  294. log.Debugf("PAX Global Extended Headers found and ignored")
  295. return nil
  296. default:
  297. return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
  298. }
  299. if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown {
  300. return err
  301. }
  302. for key, value := range hdr.Xattrs {
  303. if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
  304. return err
  305. }
  306. }
  307. // There is no LChmod, so ignore mode for symlink. Also, this
  308. // must happen after chown, as that can modify the file mode
  309. if hdr.Typeflag != tar.TypeSymlink {
  310. if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
  311. return err
  312. }
  313. }
  314. ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
  315. // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
  316. if hdr.Typeflag != tar.TypeSymlink {
  317. if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
  318. return err
  319. }
  320. } else {
  321. if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
  322. return err
  323. }
  324. }
  325. return nil
  326. }
  327. // Tar creates an archive from the directory at `path`, and returns it as a
  328. // stream of bytes.
  329. func Tar(path string, compression Compression) (io.ReadCloser, error) {
  330. return TarWithOptions(path, &TarOptions{Compression: compression})
  331. }
  332. func escapeName(name string) string {
  333. escaped := make([]byte, 0)
  334. for i, c := range []byte(name) {
  335. if i == 0 && c == '/' {
  336. continue
  337. }
  338. // all printable chars except "-" which is 0x2d
  339. if (0x20 <= c && c <= 0x7E) && c != 0x2d {
  340. escaped = append(escaped, c)
  341. } else {
  342. escaped = append(escaped, fmt.Sprintf("\\%03o", c)...)
  343. }
  344. }
  345. return string(escaped)
  346. }
  347. // TarWithOptions creates an archive from the directory at `path`, only including files whose relative
  348. // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
  349. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
  350. pipeReader, pipeWriter := io.Pipe()
  351. compressWriter, err := CompressStream(pipeWriter, options.Compression)
  352. if err != nil {
  353. return nil, err
  354. }
  355. go func() {
  356. ta := &tarAppender{
  357. TarWriter: tar.NewWriter(compressWriter),
  358. Buffer: pools.BufioWriter32KPool.Get(nil),
  359. SeenFiles: make(map[uint64]string),
  360. }
  361. // this buffer is needed for the duration of this piped stream
  362. defer pools.BufioWriter32KPool.Put(ta.Buffer)
  363. // In general we log errors here but ignore them because
  364. // during e.g. a diff operation the container can continue
  365. // mutating the filesystem and we can see transient errors
  366. // from this
  367. if options.IncludeFiles == nil {
  368. options.IncludeFiles = []string{"."}
  369. }
  370. seen := make(map[string]bool)
  371. var renamedRelFilePath string // For when tar.Options.Name is set
  372. for _, include := range options.IncludeFiles {
  373. filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
  374. if err != nil {
  375. log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
  376. return nil
  377. }
  378. relFilePath, err := filepath.Rel(srcPath, filePath)
  379. if err != nil || (relFilePath == "." && f.IsDir()) {
  380. // Error getting relative path OR we are looking
  381. // at the root path. Skip in both situations.
  382. return nil
  383. }
  384. skip := false
  385. // If "include" is an exact match for the current file
  386. // then even if there's an "excludePatterns" pattern that
  387. // matches it, don't skip it. IOW, assume an explicit 'include'
  388. // is asking for that file no matter what - which is true
  389. // for some files, like .dockerignore and Dockerfile (sometimes)
  390. if include != relFilePath {
  391. skip, err = fileutils.Matches(relFilePath, options.ExcludePatterns)
  392. if err != nil {
  393. log.Debugf("Error matching %s", relFilePath, err)
  394. return err
  395. }
  396. }
  397. if skip {
  398. if f.IsDir() {
  399. return filepath.SkipDir
  400. }
  401. return nil
  402. }
  403. if seen[relFilePath] {
  404. return nil
  405. }
  406. seen[relFilePath] = true
  407. // Rename the base resource
  408. if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
  409. renamedRelFilePath = relFilePath
  410. }
  411. // Set this to make sure the items underneath also get renamed
  412. if options.Name != "" {
  413. relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
  414. }
  415. if err := ta.addTarFile(filePath, relFilePath); err != nil {
  416. log.Debugf("Can't add file %s to tar: %s", filePath, err)
  417. }
  418. return nil
  419. })
  420. }
  421. // Make sure to check the error on Close.
  422. if err := ta.TarWriter.Close(); err != nil {
  423. log.Debugf("Can't close tar writer: %s", err)
  424. }
  425. if err := compressWriter.Close(); err != nil {
  426. log.Debugf("Can't close compress writer: %s", err)
  427. }
  428. if err := pipeWriter.Close(); err != nil {
  429. log.Debugf("Can't close pipe writer: %s", err)
  430. }
  431. }()
  432. return pipeReader, nil
  433. }
  434. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
  435. tr := tar.NewReader(decompressedArchive)
  436. trBuf := pools.BufioReader32KPool.Get(nil)
  437. defer pools.BufioReader32KPool.Put(trBuf)
  438. var dirs []*tar.Header
  439. // Iterate through the files in the archive.
  440. loop:
  441. for {
  442. hdr, err := tr.Next()
  443. if err == io.EOF {
  444. // end of tar archive
  445. break
  446. }
  447. if err != nil {
  448. return err
  449. }
  450. // Normalize name, for safety and for a simple is-root check
  451. // This keeps "../" as-is, but normalizes "/../" to "/"
  452. hdr.Name = filepath.Clean(hdr.Name)
  453. for _, exclude := range options.ExcludePatterns {
  454. if strings.HasPrefix(hdr.Name, exclude) {
  455. continue loop
  456. }
  457. }
  458. if !strings.HasSuffix(hdr.Name, "/") {
  459. // Not the root directory, ensure that the parent directory exists
  460. parent := filepath.Dir(hdr.Name)
  461. parentPath := filepath.Join(dest, parent)
  462. if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
  463. err = os.MkdirAll(parentPath, 0777)
  464. if err != nil {
  465. return err
  466. }
  467. }
  468. }
  469. path := filepath.Join(dest, hdr.Name)
  470. rel, err := filepath.Rel(dest, path)
  471. if err != nil {
  472. return err
  473. }
  474. if strings.HasPrefix(rel, "../") {
  475. return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
  476. }
  477. // If path exits we almost always just want to remove and replace it
  478. // The only exception is when it is a directory *and* the file from
  479. // the layer is also a directory. Then we want to merge them (i.e.
  480. // just apply the metadata from the layer).
  481. if fi, err := os.Lstat(path); err == nil {
  482. if fi.IsDir() && hdr.Name == "." {
  483. continue
  484. }
  485. if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
  486. if err := os.RemoveAll(path); err != nil {
  487. return err
  488. }
  489. }
  490. }
  491. trBuf.Reset(tr)
  492. if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil {
  493. return err
  494. }
  495. // Directory mtimes must be handled at the end to avoid further
  496. // file creation in them to modify the directory mtime
  497. if hdr.Typeflag == tar.TypeDir {
  498. dirs = append(dirs, hdr)
  499. }
  500. }
  501. for _, hdr := range dirs {
  502. path := filepath.Join(dest, hdr.Name)
  503. ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
  504. if err := syscall.UtimesNano(path, ts); err != nil {
  505. return err
  506. }
  507. }
  508. return nil
  509. }
  510. // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
  511. // and unpacks it into the directory at `dest`.
  512. // The archive may be compressed with one of the following algorithms:
  513. // identity (uncompressed), gzip, bzip2, xz.
  514. // FIXME: specify behavior when target path exists vs. doesn't exist.
  515. func Untar(archive io.Reader, dest string, options *TarOptions) error {
  516. if archive == nil {
  517. return fmt.Errorf("Empty archive")
  518. }
  519. dest = filepath.Clean(dest)
  520. if options == nil {
  521. options = &TarOptions{}
  522. }
  523. if options.ExcludePatterns == nil {
  524. options.ExcludePatterns = []string{}
  525. }
  526. decompressedArchive, err := DecompressStream(archive)
  527. if err != nil {
  528. return err
  529. }
  530. defer decompressedArchive.Close()
  531. return Unpack(decompressedArchive, dest, options)
  532. }
  533. func (archiver *Archiver) TarUntar(src, dst string) error {
  534. log.Debugf("TarUntar(%s %s)", src, dst)
  535. archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
  536. if err != nil {
  537. return err
  538. }
  539. defer archive.Close()
  540. return archiver.Untar(archive, dst, nil)
  541. }
  542. // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
  543. // If either Tar or Untar fails, TarUntar aborts and returns the error.
  544. func TarUntar(src, dst string) error {
  545. return defaultArchiver.TarUntar(src, dst)
  546. }
  547. func (archiver *Archiver) UntarPath(src, dst string) error {
  548. archive, err := os.Open(src)
  549. if err != nil {
  550. return err
  551. }
  552. defer archive.Close()
  553. if err := archiver.Untar(archive, dst, nil); err != nil {
  554. return err
  555. }
  556. return nil
  557. }
  558. // UntarPath is a convenience function which looks for an archive
  559. // at filesystem path `src`, and unpacks it at `dst`.
  560. func UntarPath(src, dst string) error {
  561. return defaultArchiver.UntarPath(src, dst)
  562. }
  563. func (archiver *Archiver) CopyWithTar(src, dst string) error {
  564. srcSt, err := os.Stat(src)
  565. if err != nil {
  566. return err
  567. }
  568. if !srcSt.IsDir() {
  569. return archiver.CopyFileWithTar(src, dst)
  570. }
  571. // Create dst, copy src's content into it
  572. log.Debugf("Creating dest directory: %s", dst)
  573. if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
  574. return err
  575. }
  576. log.Debugf("Calling TarUntar(%s, %s)", src, dst)
  577. return archiver.TarUntar(src, dst)
  578. }
  579. // CopyWithTar creates a tar archive of filesystem path `src`, and
  580. // unpacks it at filesystem path `dst`.
  581. // The archive is streamed directly with fixed buffering and no
  582. // intermediary disk IO.
  583. func CopyWithTar(src, dst string) error {
  584. return defaultArchiver.CopyWithTar(src, dst)
  585. }
  586. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
  587. log.Debugf("CopyFileWithTar(%s, %s)", src, dst)
  588. srcSt, err := os.Stat(src)
  589. if err != nil {
  590. return err
  591. }
  592. if srcSt.IsDir() {
  593. return fmt.Errorf("Can't copy a directory")
  594. }
  595. // Clean up the trailing /
  596. if dst[len(dst)-1] == '/' {
  597. dst = path.Join(dst, filepath.Base(src))
  598. }
  599. // Create the holding directory if necessary
  600. if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
  601. return err
  602. }
  603. r, w := io.Pipe()
  604. errC := promise.Go(func() error {
  605. defer w.Close()
  606. srcF, err := os.Open(src)
  607. if err != nil {
  608. return err
  609. }
  610. defer srcF.Close()
  611. hdr, err := tar.FileInfoHeader(srcSt, "")
  612. if err != nil {
  613. return err
  614. }
  615. hdr.Name = filepath.Base(dst)
  616. hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
  617. tw := tar.NewWriter(w)
  618. defer tw.Close()
  619. if err := tw.WriteHeader(hdr); err != nil {
  620. return err
  621. }
  622. if _, err := io.Copy(tw, srcF); err != nil {
  623. return err
  624. }
  625. return nil
  626. })
  627. defer func() {
  628. if er := <-errC; err != nil {
  629. err = er
  630. }
  631. }()
  632. return archiver.Untar(r, filepath.Dir(dst), nil)
  633. }
  634. // CopyFileWithTar emulates the behavior of the 'cp' command-line
  635. // for a single file. It copies a regular file from path `src` to
  636. // path `dst`, and preserves all its metadata.
  637. //
  638. // If `dst` ends with a trailing slash '/', the final destination path
  639. // will be `dst/base(src)`.
  640. func CopyFileWithTar(src, dst string) (err error) {
  641. return defaultArchiver.CopyFileWithTar(src, dst)
  642. }
  643. // CmdStream executes a command, and returns its stdout as a stream.
  644. // If the command fails to run or doesn't complete successfully, an error
  645. // will be returned, including anything written on stderr.
  646. func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
  647. if input != nil {
  648. stdin, err := cmd.StdinPipe()
  649. if err != nil {
  650. return nil, err
  651. }
  652. // Write stdin if any
  653. go func() {
  654. io.Copy(stdin, input)
  655. stdin.Close()
  656. }()
  657. }
  658. stdout, err := cmd.StdoutPipe()
  659. if err != nil {
  660. return nil, err
  661. }
  662. stderr, err := cmd.StderrPipe()
  663. if err != nil {
  664. return nil, err
  665. }
  666. pipeR, pipeW := io.Pipe()
  667. errChan := make(chan []byte)
  668. // Collect stderr, we will use it in case of an error
  669. go func() {
  670. errText, e := ioutil.ReadAll(stderr)
  671. if e != nil {
  672. errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
  673. }
  674. errChan <- errText
  675. }()
  676. // Copy stdout to the returned pipe
  677. go func() {
  678. _, err := io.Copy(pipeW, stdout)
  679. if err != nil {
  680. pipeW.CloseWithError(err)
  681. }
  682. errText := <-errChan
  683. if err := cmd.Wait(); err != nil {
  684. pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
  685. } else {
  686. pipeW.Close()
  687. }
  688. }()
  689. // Run the command and return the pipe
  690. if err := cmd.Start(); err != nil {
  691. return nil, err
  692. }
  693. return pipeR, nil
  694. }
  695. // NewTempArchive reads the content of src into a temporary file, and returns the contents
  696. // of that file as an archive. The archive can only be read once - as soon as reading completes,
  697. // the file will be deleted.
  698. func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
  699. f, err := ioutil.TempFile(dir, "")
  700. if err != nil {
  701. return nil, err
  702. }
  703. if _, err := io.Copy(f, src); err != nil {
  704. return nil, err
  705. }
  706. if err = f.Sync(); err != nil {
  707. return nil, err
  708. }
  709. if _, err := f.Seek(0, 0); err != nil {
  710. return nil, err
  711. }
  712. st, err := f.Stat()
  713. if err != nil {
  714. return nil, err
  715. }
  716. size := st.Size()
  717. return &TempArchive{File: f, Size: size}, nil
  718. }
  719. type TempArchive struct {
  720. *os.File
  721. Size int64 // Pre-computed from Stat().Size() as a convenience
  722. read int64
  723. closed bool
  724. }
  725. // Close closes the underlying file if it's still open, or does a no-op
  726. // to allow callers to try to close the TempArchive multiple times safely.
  727. func (archive *TempArchive) Close() error {
  728. if archive.closed {
  729. return nil
  730. }
  731. archive.closed = true
  732. return archive.File.Close()
  733. }
  734. func (archive *TempArchive) Read(data []byte) (int, error) {
  735. n, err := archive.File.Read(data)
  736. archive.read += int64(n)
  737. if err != nil || archive.read == archive.Size {
  738. archive.Close()
  739. os.Remove(archive.File.Name())
  740. }
  741. return n, err
  742. }