archive.go 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319
  1. package archive // import "github.com/docker/docker/pkg/archive"
  2. import (
  3. "archive/tar"
  4. "bufio"
  5. "bytes"
  6. "compress/bzip2"
  7. "compress/gzip"
  8. "context"
  9. "fmt"
  10. "io"
  11. "io/ioutil"
  12. "os"
  13. "path/filepath"
  14. "runtime"
  15. "strconv"
  16. "strings"
  17. "syscall"
  18. "time"
  19. "github.com/docker/docker/pkg/fileutils"
  20. "github.com/docker/docker/pkg/idtools"
  21. "github.com/docker/docker/pkg/ioutils"
  22. "github.com/docker/docker/pkg/pools"
  23. "github.com/docker/docker/pkg/system"
  24. "github.com/sirupsen/logrus"
  25. exec "golang.org/x/sys/execabs"
  26. )
  27. type (
  28. // Compression is the state represents if compressed or not.
  29. Compression int
  30. // WhiteoutFormat is the format of whiteouts unpacked
  31. WhiteoutFormat int
  32. // TarOptions wraps the tar options.
  33. TarOptions struct {
  34. IncludeFiles []string
  35. ExcludePatterns []string
  36. Compression Compression
  37. NoLchown bool
  38. UIDMaps []idtools.IDMap
  39. GIDMaps []idtools.IDMap
  40. ChownOpts *idtools.Identity
  41. IncludeSourceDir bool
  42. // WhiteoutFormat is the expected on disk format for whiteout files.
  43. // This format will be converted to the standard format on pack
  44. // and from the standard format on unpack.
  45. WhiteoutFormat WhiteoutFormat
  46. // When unpacking, specifies whether overwriting a directory with a
  47. // non-directory is allowed and vice versa.
  48. NoOverwriteDirNonDir bool
  49. // For each include when creating an archive, the included name will be
  50. // replaced with the matching name from this map.
  51. RebaseNames map[string]string
  52. InUserNS bool
  53. }
  54. )
  55. // Archiver implements the Archiver interface and allows the reuse of most utility functions of
  56. // this package with a pluggable Untar function. Also, to facilitate the passing of specific id
  57. // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
  58. type Archiver struct {
  59. Untar func(io.Reader, string, *TarOptions) error
  60. IDMapping *idtools.IdentityMapping
  61. }
  62. // NewDefaultArchiver returns a new Archiver without any IdentityMapping
  63. func NewDefaultArchiver() *Archiver {
  64. return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}}
  65. }
  66. // breakoutError is used to differentiate errors related to breaking out
  67. // When testing archive breakout in the unit tests, this error is expected
  68. // in order for the test to pass.
  69. type breakoutError error
  70. const (
  71. // Uncompressed represents the uncompressed.
  72. Uncompressed Compression = iota
  73. // Bzip2 is bzip2 compression algorithm.
  74. Bzip2
  75. // Gzip is gzip compression algorithm.
  76. Gzip
  77. // Xz is xz compression algorithm.
  78. Xz
  79. )
  80. const (
  81. // AUFSWhiteoutFormat is the default format for whiteouts
  82. AUFSWhiteoutFormat WhiteoutFormat = iota
  83. // OverlayWhiteoutFormat formats whiteout according to the overlay
  84. // standard.
  85. OverlayWhiteoutFormat
  86. )
  87. const (
  88. modeISDIR = 040000 // Directory
  89. modeISFIFO = 010000 // FIFO
  90. modeISREG = 0100000 // Regular file
  91. modeISLNK = 0120000 // Symbolic link
  92. modeISBLK = 060000 // Block special file
  93. modeISCHR = 020000 // Character special file
  94. modeISSOCK = 0140000 // Socket
  95. )
  96. // IsArchivePath checks if the (possibly compressed) file at the given path
  97. // starts with a tar file header.
  98. func IsArchivePath(path string) bool {
  99. file, err := os.Open(path)
  100. if err != nil {
  101. return false
  102. }
  103. defer file.Close()
  104. rdr, err := DecompressStream(file)
  105. if err != nil {
  106. return false
  107. }
  108. defer rdr.Close()
  109. r := tar.NewReader(rdr)
  110. _, err = r.Next()
  111. return err == nil
  112. }
  113. // DetectCompression detects the compression algorithm of the source.
  114. func DetectCompression(source []byte) Compression {
  115. for compression, m := range map[Compression][]byte{
  116. Bzip2: {0x42, 0x5A, 0x68},
  117. Gzip: {0x1F, 0x8B, 0x08},
  118. Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
  119. } {
  120. if len(source) < len(m) {
  121. continue
  122. }
  123. if bytes.Equal(m, source[:len(m)]) {
  124. return compression
  125. }
  126. }
  127. return Uncompressed
  128. }
  129. func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) {
  130. args := []string{"xz", "-d", "-c", "-q"}
  131. return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive)
  132. }
  133. func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
  134. noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ")
  135. var noPigz bool
  136. if noPigzEnv != "" {
  137. var err error
  138. noPigz, err = strconv.ParseBool(noPigzEnv)
  139. if err != nil {
  140. logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var")
  141. }
  142. }
  143. if noPigz {
  144. logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv)
  145. return gzip.NewReader(buf)
  146. }
  147. unpigzPath, err := exec.LookPath("unpigz")
  148. if err != nil {
  149. logrus.Debugf("unpigz binary not found, falling back to go gzip library")
  150. return gzip.NewReader(buf)
  151. }
  152. logrus.Debugf("Using %s to decompress", unpigzPath)
  153. return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
  154. }
  155. func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser {
  156. return ioutils.NewReadCloserWrapper(readBuf, func() error {
  157. cancel()
  158. return readBuf.Close()
  159. })
  160. }
  161. // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
  162. func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
  163. p := pools.BufioReader32KPool
  164. buf := p.Get(archive)
  165. bs, err := buf.Peek(10)
  166. if err != nil && err != io.EOF {
  167. // Note: we'll ignore any io.EOF error because there are some odd
  168. // cases where the layer.tar file will be empty (zero bytes) and
  169. // that results in an io.EOF from the Peek() call. So, in those
  170. // cases we'll just treat it as a non-compressed stream and
  171. // that means just create an empty layer.
  172. // See Issue 18170
  173. return nil, err
  174. }
  175. compression := DetectCompression(bs)
  176. switch compression {
  177. case Uncompressed:
  178. readBufWrapper := p.NewReadCloserWrapper(buf, buf)
  179. return readBufWrapper, nil
  180. case Gzip:
  181. ctx, cancel := context.WithCancel(context.Background())
  182. gzReader, err := gzDecompress(ctx, buf)
  183. if err != nil {
  184. cancel()
  185. return nil, err
  186. }
  187. readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
  188. return wrapReadCloser(readBufWrapper, cancel), nil
  189. case Bzip2:
  190. bz2Reader := bzip2.NewReader(buf)
  191. readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
  192. return readBufWrapper, nil
  193. case Xz:
  194. ctx, cancel := context.WithCancel(context.Background())
  195. xzReader, err := xzDecompress(ctx, buf)
  196. if err != nil {
  197. cancel()
  198. return nil, err
  199. }
  200. readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
  201. return wrapReadCloser(readBufWrapper, cancel), nil
  202. default:
  203. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  204. }
  205. }
  206. // CompressStream compresses the dest with specified compression algorithm.
  207. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
  208. p := pools.BufioWriter32KPool
  209. buf := p.Get(dest)
  210. switch compression {
  211. case Uncompressed:
  212. writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
  213. return writeBufWrapper, nil
  214. case Gzip:
  215. gzWriter := gzip.NewWriter(dest)
  216. writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
  217. return writeBufWrapper, nil
  218. case Bzip2, Xz:
  219. // archive/bzip2 does not support writing, and there is no xz support at all
  220. // However, this is not a problem as docker only currently generates gzipped tars
  221. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  222. default:
  223. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  224. }
  225. }
  226. // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
  227. // modify the contents or header of an entry in the archive. If the file already
  228. // exists in the archive the TarModifierFunc will be called with the Header and
  229. // a reader which will return the files content. If the file does not exist both
  230. // header and content will be nil.
  231. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
  232. // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
  233. // tar stream are modified if they match any of the keys in mods.
  234. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
  235. pipeReader, pipeWriter := io.Pipe()
  236. go func() {
  237. tarReader := tar.NewReader(inputTarStream)
  238. tarWriter := tar.NewWriter(pipeWriter)
  239. defer inputTarStream.Close()
  240. defer tarWriter.Close()
  241. modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
  242. header, data, err := modifier(name, original, tarReader)
  243. switch {
  244. case err != nil:
  245. return err
  246. case header == nil:
  247. return nil
  248. }
  249. if header.Name == "" {
  250. header.Name = name
  251. }
  252. header.Size = int64(len(data))
  253. if err := tarWriter.WriteHeader(header); err != nil {
  254. return err
  255. }
  256. if len(data) != 0 {
  257. if _, err := tarWriter.Write(data); err != nil {
  258. return err
  259. }
  260. }
  261. return nil
  262. }
  263. var err error
  264. var originalHeader *tar.Header
  265. for {
  266. originalHeader, err = tarReader.Next()
  267. if err == io.EOF {
  268. break
  269. }
  270. if err != nil {
  271. pipeWriter.CloseWithError(err)
  272. return
  273. }
  274. modifier, ok := mods[originalHeader.Name]
  275. if !ok {
  276. // No modifiers for this file, copy the header and data
  277. if err := tarWriter.WriteHeader(originalHeader); err != nil {
  278. pipeWriter.CloseWithError(err)
  279. return
  280. }
  281. if _, err := pools.Copy(tarWriter, tarReader); err != nil {
  282. pipeWriter.CloseWithError(err)
  283. return
  284. }
  285. continue
  286. }
  287. delete(mods, originalHeader.Name)
  288. if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
  289. pipeWriter.CloseWithError(err)
  290. return
  291. }
  292. }
  293. // Apply the modifiers that haven't matched any files in the archive
  294. for name, modifier := range mods {
  295. if err := modify(name, nil, modifier, nil); err != nil {
  296. pipeWriter.CloseWithError(err)
  297. return
  298. }
  299. }
  300. pipeWriter.Close()
  301. }()
  302. return pipeReader
  303. }
  304. // Extension returns the extension of a file that uses the specified compression algorithm.
  305. func (compression *Compression) Extension() string {
  306. switch *compression {
  307. case Uncompressed:
  308. return "tar"
  309. case Bzip2:
  310. return "tar.bz2"
  311. case Gzip:
  312. return "tar.gz"
  313. case Xz:
  314. return "tar.xz"
  315. }
  316. return ""
  317. }
  318. // FileInfoHeader creates a populated Header from fi.
  319. // Compared to archive pkg this function fills in more information.
  320. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
  321. // which have been deleted since Go 1.9 archive/tar.
  322. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
  323. hdr, err := tar.FileInfoHeader(fi, link)
  324. if err != nil {
  325. return nil, err
  326. }
  327. hdr.Format = tar.FormatPAX
  328. hdr.ModTime = hdr.ModTime.Truncate(time.Second)
  329. hdr.AccessTime = time.Time{}
  330. hdr.ChangeTime = time.Time{}
  331. hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
  332. hdr.Name = canonicalTarName(name, fi.IsDir())
  333. if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
  334. return nil, err
  335. }
  336. return hdr, nil
  337. }
  338. // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
  339. // https://github.com/golang/go/commit/66b5a2f
  340. func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
  341. fm := fi.Mode()
  342. switch {
  343. case fm.IsRegular():
  344. mode |= modeISREG
  345. case fi.IsDir():
  346. mode |= modeISDIR
  347. case fm&os.ModeSymlink != 0:
  348. mode |= modeISLNK
  349. case fm&os.ModeDevice != 0:
  350. if fm&os.ModeCharDevice != 0 {
  351. mode |= modeISCHR
  352. } else {
  353. mode |= modeISBLK
  354. }
  355. case fm&os.ModeNamedPipe != 0:
  356. mode |= modeISFIFO
  357. case fm&os.ModeSocket != 0:
  358. mode |= modeISSOCK
  359. }
  360. return mode
  361. }
  362. // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
  363. // to a tar header
  364. func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
  365. const (
  366. // Values based on linux/include/uapi/linux/capability.h
  367. xattrCapsSz2 = 20
  368. versionOffset = 3
  369. vfsCapRevision2 = 2
  370. vfsCapRevision3 = 3
  371. )
  372. capability, _ := system.Lgetxattr(path, "security.capability")
  373. if capability != nil {
  374. length := len(capability)
  375. if capability[versionOffset] == vfsCapRevision3 {
  376. // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no
  377. // sense outside the user namespace the archive is built in.
  378. capability[versionOffset] = vfsCapRevision2
  379. length = xattrCapsSz2
  380. }
  381. hdr.Xattrs = make(map[string]string)
  382. hdr.Xattrs["security.capability"] = string(capability[:length])
  383. }
  384. return nil
  385. }
  386. type tarWhiteoutConverter interface {
  387. ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
  388. ConvertRead(*tar.Header, string) (bool, error)
  389. }
  390. type tarAppender struct {
  391. TarWriter *tar.Writer
  392. Buffer *bufio.Writer
  393. // for hardlink mapping
  394. SeenFiles map[uint64]string
  395. IdentityMapping *idtools.IdentityMapping
  396. ChownOpts *idtools.Identity
  397. // For packing and unpacking whiteout files in the
  398. // non standard format. The whiteout files defined
  399. // by the AUFS standard are used as the tar whiteout
  400. // standard.
  401. WhiteoutConverter tarWhiteoutConverter
  402. }
  403. func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
  404. return &tarAppender{
  405. SeenFiles: make(map[uint64]string),
  406. TarWriter: tar.NewWriter(writer),
  407. Buffer: pools.BufioWriter32KPool.Get(nil),
  408. IdentityMapping: idMapping,
  409. ChownOpts: chownOpts,
  410. }
  411. }
  412. // canonicalTarName provides a platform-independent and consistent posix-style
  413. // path for files and directories to be archived regardless of the platform.
  414. func canonicalTarName(name string, isDir bool) string {
  415. name = CanonicalTarNameForPath(name)
  416. // suffix with '/' for directories
  417. if isDir && !strings.HasSuffix(name, "/") {
  418. name += "/"
  419. }
  420. return name
  421. }
  422. // addTarFile adds to the tar archive a file from `path` as `name`
  423. func (ta *tarAppender) addTarFile(path, name string) error {
  424. fi, err := os.Lstat(path)
  425. if err != nil {
  426. return err
  427. }
  428. var link string
  429. if fi.Mode()&os.ModeSymlink != 0 {
  430. var err error
  431. link, err = os.Readlink(path)
  432. if err != nil {
  433. return err
  434. }
  435. }
  436. hdr, err := FileInfoHeader(name, fi, link)
  437. if err != nil {
  438. return err
  439. }
  440. if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
  441. return err
  442. }
  443. // if it's not a directory and has more than 1 link,
  444. // it's hard linked, so set the type flag accordingly
  445. if !fi.IsDir() && hasHardlinks(fi) {
  446. inode, err := getInodeFromStat(fi.Sys())
  447. if err != nil {
  448. return err
  449. }
  450. // a link should have a name that it links too
  451. // and that linked name should be first in the tar archive
  452. if oldpath, ok := ta.SeenFiles[inode]; ok {
  453. hdr.Typeflag = tar.TypeLink
  454. hdr.Linkname = oldpath
  455. hdr.Size = 0 // This Must be here for the writer math to add up!
  456. } else {
  457. ta.SeenFiles[inode] = name
  458. }
  459. }
  460. // check whether the file is overlayfs whiteout
  461. // if yes, skip re-mapping container ID mappings.
  462. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
  463. // handle re-mapping container ID mappings back to host ID mappings before
  464. // writing tar headers/files. We skip whiteout files because they were written
  465. // by the kernel and already have proper ownership relative to the host
  466. if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
  467. fileIDPair, err := getFileUIDGID(fi.Sys())
  468. if err != nil {
  469. return err
  470. }
  471. hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair)
  472. if err != nil {
  473. return err
  474. }
  475. }
  476. // explicitly override with ChownOpts
  477. if ta.ChownOpts != nil {
  478. hdr.Uid = ta.ChownOpts.UID
  479. hdr.Gid = ta.ChownOpts.GID
  480. }
  481. if ta.WhiteoutConverter != nil {
  482. wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
  483. if err != nil {
  484. return err
  485. }
  486. // If a new whiteout file exists, write original hdr, then
  487. // replace hdr with wo to be written after. Whiteouts should
  488. // always be written after the original. Note the original
  489. // hdr may have been updated to be a whiteout with returning
  490. // a whiteout header
  491. if wo != nil {
  492. if err := ta.TarWriter.WriteHeader(hdr); err != nil {
  493. return err
  494. }
  495. if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
  496. return fmt.Errorf("tar: cannot use whiteout for non-empty file")
  497. }
  498. hdr = wo
  499. }
  500. }
  501. if err := ta.TarWriter.WriteHeader(hdr); err != nil {
  502. return err
  503. }
  504. if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
  505. // We use system.OpenSequential to ensure we use sequential file
  506. // access on Windows to avoid depleting the standby list.
  507. // On Linux, this equates to a regular os.Open.
  508. file, err := system.OpenSequential(path)
  509. if err != nil {
  510. return err
  511. }
  512. ta.Buffer.Reset(ta.TarWriter)
  513. defer ta.Buffer.Reset(nil)
  514. _, err = io.Copy(ta.Buffer, file)
  515. file.Close()
  516. if err != nil {
  517. return err
  518. }
  519. err = ta.Buffer.Flush()
  520. if err != nil {
  521. return err
  522. }
  523. }
  524. return nil
  525. }
  526. func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error {
  527. // hdr.Mode is in linux format, which we can use for sycalls,
  528. // but for os.Foo() calls we need the mode converted to os.FileMode,
  529. // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
  530. hdrInfo := hdr.FileInfo()
  531. switch hdr.Typeflag {
  532. case tar.TypeDir:
  533. // Create directory unless it exists as a directory already.
  534. // In that case we just want to merge the two
  535. if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
  536. if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
  537. return err
  538. }
  539. }
  540. case tar.TypeReg, tar.TypeRegA:
  541. // Source is regular file. We use system.OpenFileSequential to use sequential
  542. // file access to avoid depleting the standby list on Windows.
  543. // On Linux, this equates to a regular os.OpenFile
  544. file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
  545. if err != nil {
  546. return err
  547. }
  548. if _, err := io.Copy(file, reader); err != nil {
  549. file.Close()
  550. return err
  551. }
  552. file.Close()
  553. case tar.TypeBlock, tar.TypeChar:
  554. if inUserns { // cannot create devices in a userns
  555. return nil
  556. }
  557. // Handle this is an OS-specific way
  558. if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
  559. return err
  560. }
  561. case tar.TypeFifo:
  562. // Handle this is an OS-specific way
  563. if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
  564. return err
  565. }
  566. case tar.TypeLink:
  567. targetPath := filepath.Join(extractDir, hdr.Linkname)
  568. // check for hardlink breakout
  569. if !strings.HasPrefix(targetPath, extractDir) {
  570. return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
  571. }
  572. if err := os.Link(targetPath, path); err != nil {
  573. return err
  574. }
  575. case tar.TypeSymlink:
  576. // path -> hdr.Linkname = targetPath
  577. // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
  578. targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
  579. // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
  580. // that symlink would first have to be created, which would be caught earlier, at this very check:
  581. if !strings.HasPrefix(targetPath, extractDir) {
  582. return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
  583. }
  584. if err := os.Symlink(hdr.Linkname, path); err != nil {
  585. return err
  586. }
  587. case tar.TypeXGlobalHeader:
  588. logrus.Debug("PAX Global Extended Headers found and ignored")
  589. return nil
  590. default:
  591. return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
  592. }
  593. // Lchown is not supported on Windows.
  594. if Lchown && runtime.GOOS != "windows" {
  595. if chownOpts == nil {
  596. chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}
  597. }
  598. if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
  599. return err
  600. }
  601. }
  602. var errors []string
  603. for key, value := range hdr.Xattrs {
  604. if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
  605. if err == syscall.ENOTSUP || err == syscall.EPERM {
  606. // We ignore errors here because not all graphdrivers support
  607. // xattrs *cough* old versions of AUFS *cough*. However only
  608. // ENOTSUP should be emitted in that case, otherwise we still
  609. // bail.
  610. // EPERM occurs if modifying xattrs is not allowed. This can
  611. // happen when running in userns with restrictions (ChromeOS).
  612. errors = append(errors, err.Error())
  613. continue
  614. }
  615. return err
  616. }
  617. }
  618. if len(errors) > 0 {
  619. logrus.WithFields(logrus.Fields{
  620. "errors": errors,
  621. }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
  622. }
  623. // There is no LChmod, so ignore mode for symlink. Also, this
  624. // must happen after chown, as that can modify the file mode
  625. if err := handleLChmod(hdr, path, hdrInfo); err != nil {
  626. return err
  627. }
  628. aTime := hdr.AccessTime
  629. if aTime.Before(hdr.ModTime) {
  630. // Last access time should never be before last modified time.
  631. aTime = hdr.ModTime
  632. }
  633. // system.Chtimes doesn't support a NOFOLLOW flag atm
  634. if hdr.Typeflag == tar.TypeLink {
  635. if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
  636. if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
  637. return err
  638. }
  639. }
  640. } else if hdr.Typeflag != tar.TypeSymlink {
  641. if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
  642. return err
  643. }
  644. } else {
  645. ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
  646. if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
  647. return err
  648. }
  649. }
  650. return nil
  651. }
  652. // Tar creates an archive from the directory at `path`, and returns it as a
  653. // stream of bytes.
  654. func Tar(path string, compression Compression) (io.ReadCloser, error) {
  655. return TarWithOptions(path, &TarOptions{Compression: compression})
  656. }
  657. // TarWithOptions creates an archive from the directory at `path`, only including files whose relative
  658. // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
  659. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
  660. // Fix the source path to work with long path names. This is a no-op
  661. // on platforms other than Windows.
  662. srcPath = fixVolumePathPrefix(srcPath)
  663. pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
  664. if err != nil {
  665. return nil, err
  666. }
  667. pipeReader, pipeWriter := io.Pipe()
  668. compressWriter, err := CompressStream(pipeWriter, options.Compression)
  669. if err != nil {
  670. return nil, err
  671. }
  672. whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
  673. if err != nil {
  674. return nil, err
  675. }
  676. go func() {
  677. ta := newTarAppender(
  678. idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
  679. compressWriter,
  680. options.ChownOpts,
  681. )
  682. ta.WhiteoutConverter = whiteoutConverter
  683. defer func() {
  684. // Make sure to check the error on Close.
  685. if err := ta.TarWriter.Close(); err != nil {
  686. logrus.Errorf("Can't close tar writer: %s", err)
  687. }
  688. if err := compressWriter.Close(); err != nil {
  689. logrus.Errorf("Can't close compress writer: %s", err)
  690. }
  691. if err := pipeWriter.Close(); err != nil {
  692. logrus.Errorf("Can't close pipe writer: %s", err)
  693. }
  694. }()
  695. // this buffer is needed for the duration of this piped stream
  696. defer pools.BufioWriter32KPool.Put(ta.Buffer)
  697. // In general we log errors here but ignore them because
  698. // during e.g. a diff operation the container can continue
  699. // mutating the filesystem and we can see transient errors
  700. // from this
  701. stat, err := os.Lstat(srcPath)
  702. if err != nil {
  703. return
  704. }
  705. if !stat.IsDir() {
  706. // We can't later join a non-dir with any includes because the
  707. // 'walk' will error if "file/." is stat-ed and "file" is not a
  708. // directory. So, we must split the source path and use the
  709. // basename as the include.
  710. if len(options.IncludeFiles) > 0 {
  711. logrus.Warn("Tar: Can't archive a file with includes")
  712. }
  713. dir, base := SplitPathDirEntry(srcPath)
  714. srcPath = dir
  715. options.IncludeFiles = []string{base}
  716. }
  717. if len(options.IncludeFiles) == 0 {
  718. options.IncludeFiles = []string{"."}
  719. }
  720. seen := make(map[string]bool)
  721. for _, include := range options.IncludeFiles {
  722. rebaseName := options.RebaseNames[include]
  723. walkRoot := getWalkRoot(srcPath, include)
  724. filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
  725. if err != nil {
  726. logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
  727. return nil
  728. }
  729. relFilePath, err := filepath.Rel(srcPath, filePath)
  730. if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
  731. // Error getting relative path OR we are looking
  732. // at the source directory path. Skip in both situations.
  733. return nil
  734. }
  735. if options.IncludeSourceDir && include == "." && relFilePath != "." {
  736. relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
  737. }
  738. skip := false
  739. // If "include" is an exact match for the current file
  740. // then even if there's an "excludePatterns" pattern that
  741. // matches it, don't skip it. IOW, assume an explicit 'include'
  742. // is asking for that file no matter what - which is true
  743. // for some files, like .dockerignore and Dockerfile (sometimes)
  744. if include != relFilePath {
  745. skip, err = pm.Matches(relFilePath)
  746. if err != nil {
  747. logrus.Errorf("Error matching %s: %v", relFilePath, err)
  748. return err
  749. }
  750. }
  751. if skip {
  752. // If we want to skip this file and its a directory
  753. // then we should first check to see if there's an
  754. // excludes pattern (e.g. !dir/file) that starts with this
  755. // dir. If so then we can't skip this dir.
  756. // Its not a dir then so we can just return/skip.
  757. if !f.IsDir() {
  758. return nil
  759. }
  760. // No exceptions (!...) in patterns so just skip dir
  761. if !pm.Exclusions() {
  762. return filepath.SkipDir
  763. }
  764. dirSlash := relFilePath + string(filepath.Separator)
  765. for _, pat := range pm.Patterns() {
  766. if !pat.Exclusion() {
  767. continue
  768. }
  769. if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
  770. // found a match - so can't skip this dir
  771. return nil
  772. }
  773. }
  774. // No matching exclusion dir so just skip dir
  775. return filepath.SkipDir
  776. }
  777. if seen[relFilePath] {
  778. return nil
  779. }
  780. seen[relFilePath] = true
  781. // Rename the base resource.
  782. if rebaseName != "" {
  783. var replacement string
  784. if rebaseName != string(filepath.Separator) {
  785. // Special case the root directory to replace with an
  786. // empty string instead so that we don't end up with
  787. // double slashes in the paths.
  788. replacement = rebaseName
  789. }
  790. relFilePath = strings.Replace(relFilePath, include, replacement, 1)
  791. }
  792. if err := ta.addTarFile(filePath, relFilePath); err != nil {
  793. logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
  794. // if pipe is broken, stop writing tar stream to it
  795. if err == io.ErrClosedPipe {
  796. return err
  797. }
  798. }
  799. return nil
  800. })
  801. }
  802. }()
  803. return pipeReader, nil
  804. }
  805. // Unpack unpacks the decompressedArchive to dest with options.
  806. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
  807. tr := tar.NewReader(decompressedArchive)
  808. trBuf := pools.BufioReader32KPool.Get(nil)
  809. defer pools.BufioReader32KPool.Put(trBuf)
  810. var dirs []*tar.Header
  811. idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
  812. rootIDs := idMapping.RootPair()
  813. whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
  814. if err != nil {
  815. return err
  816. }
  817. // Iterate through the files in the archive.
  818. loop:
  819. for {
  820. hdr, err := tr.Next()
  821. if err == io.EOF {
  822. // end of tar archive
  823. break
  824. }
  825. if err != nil {
  826. return err
  827. }
  828. // ignore XGlobalHeader early to avoid creating parent directories for them
  829. if hdr.Typeflag == tar.TypeXGlobalHeader {
  830. logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name)
  831. continue
  832. }
  833. // Normalize name, for safety and for a simple is-root check
  834. // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
  835. // This keeps "..\" as-is, but normalizes "\..\" to "\".
  836. hdr.Name = filepath.Clean(hdr.Name)
  837. for _, exclude := range options.ExcludePatterns {
  838. if strings.HasPrefix(hdr.Name, exclude) {
  839. continue loop
  840. }
  841. }
  842. // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
  843. // the filepath format for the OS on which the daemon is running. Hence
  844. // the check for a slash-suffix MUST be done in an OS-agnostic way.
  845. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
  846. // Not the root directory, ensure that the parent directory exists
  847. parent := filepath.Dir(hdr.Name)
  848. parentPath := filepath.Join(dest, parent)
  849. if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
  850. err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs)
  851. if err != nil {
  852. return err
  853. }
  854. }
  855. }
  856. path := filepath.Join(dest, hdr.Name)
  857. rel, err := filepath.Rel(dest, path)
  858. if err != nil {
  859. return err
  860. }
  861. if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
  862. return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
  863. }
  864. // If path exits we almost always just want to remove and replace it
  865. // The only exception is when it is a directory *and* the file from
  866. // the layer is also a directory. Then we want to merge them (i.e.
  867. // just apply the metadata from the layer).
  868. if fi, err := os.Lstat(path); err == nil {
  869. if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
  870. // If NoOverwriteDirNonDir is true then we cannot replace
  871. // an existing directory with a non-directory from the archive.
  872. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
  873. }
  874. if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
  875. // If NoOverwriteDirNonDir is true then we cannot replace
  876. // an existing non-directory with a directory from the archive.
  877. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
  878. }
  879. if fi.IsDir() && hdr.Name == "." {
  880. continue
  881. }
  882. if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
  883. if err := os.RemoveAll(path); err != nil {
  884. return err
  885. }
  886. }
  887. }
  888. trBuf.Reset(tr)
  889. if err := remapIDs(idMapping, hdr); err != nil {
  890. return err
  891. }
  892. if whiteoutConverter != nil {
  893. writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
  894. if err != nil {
  895. return err
  896. }
  897. if !writeFile {
  898. continue
  899. }
  900. }
  901. if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
  902. return err
  903. }
  904. // Directory mtimes must be handled at the end to avoid further
  905. // file creation in them to modify the directory mtime
  906. if hdr.Typeflag == tar.TypeDir {
  907. dirs = append(dirs, hdr)
  908. }
  909. }
  910. for _, hdr := range dirs {
  911. path := filepath.Join(dest, hdr.Name)
  912. if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
  913. return err
  914. }
  915. }
  916. return nil
  917. }
  918. // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
  919. // and unpacks it into the directory at `dest`.
  920. // The archive may be compressed with one of the following algorithms:
  921. // identity (uncompressed), gzip, bzip2, xz.
  922. // FIXME: specify behavior when target path exists vs. doesn't exist.
  923. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
  924. return untarHandler(tarArchive, dest, options, true)
  925. }
  926. // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
  927. // and unpacks it into the directory at `dest`.
  928. // The archive must be an uncompressed stream.
  929. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
  930. return untarHandler(tarArchive, dest, options, false)
  931. }
  932. // Handler for teasing out the automatic decompression
  933. func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
  934. if tarArchive == nil {
  935. return fmt.Errorf("Empty archive")
  936. }
  937. dest = filepath.Clean(dest)
  938. if options == nil {
  939. options = &TarOptions{}
  940. }
  941. if options.ExcludePatterns == nil {
  942. options.ExcludePatterns = []string{}
  943. }
  944. r := tarArchive
  945. if decompress {
  946. decompressedArchive, err := DecompressStream(tarArchive)
  947. if err != nil {
  948. return err
  949. }
  950. defer decompressedArchive.Close()
  951. r = decompressedArchive
  952. }
  953. return Unpack(r, dest, options)
  954. }
  955. // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
  956. // If either Tar or Untar fails, TarUntar aborts and returns the error.
  957. func (archiver *Archiver) TarUntar(src, dst string) error {
  958. archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
  959. if err != nil {
  960. return err
  961. }
  962. defer archive.Close()
  963. options := &TarOptions{
  964. UIDMaps: archiver.IDMapping.UIDs(),
  965. GIDMaps: archiver.IDMapping.GIDs(),
  966. }
  967. return archiver.Untar(archive, dst, options)
  968. }
  969. // UntarPath untar a file from path to a destination, src is the source tar file path.
  970. func (archiver *Archiver) UntarPath(src, dst string) error {
  971. archive, err := os.Open(src)
  972. if err != nil {
  973. return err
  974. }
  975. defer archive.Close()
  976. options := &TarOptions{
  977. UIDMaps: archiver.IDMapping.UIDs(),
  978. GIDMaps: archiver.IDMapping.GIDs(),
  979. }
  980. return archiver.Untar(archive, dst, options)
  981. }
  982. // CopyWithTar creates a tar archive of filesystem path `src`, and
  983. // unpacks it at filesystem path `dst`.
  984. // The archive is streamed directly with fixed buffering and no
  985. // intermediary disk IO.
  986. func (archiver *Archiver) CopyWithTar(src, dst string) error {
  987. srcSt, err := os.Stat(src)
  988. if err != nil {
  989. return err
  990. }
  991. if !srcSt.IsDir() {
  992. return archiver.CopyFileWithTar(src, dst)
  993. }
  994. // if this Archiver is set up with ID mapping we need to create
  995. // the new destination directory with the remapped root UID/GID pair
  996. // as owner
  997. rootIDs := archiver.IDMapping.RootPair()
  998. // Create dst, copy src's content into it
  999. if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
  1000. return err
  1001. }
  1002. return archiver.TarUntar(src, dst)
  1003. }
  1004. // CopyFileWithTar emulates the behavior of the 'cp' command-line
  1005. // for a single file. It copies a regular file from path `src` to
  1006. // path `dst`, and preserves all its metadata.
  1007. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
  1008. srcSt, err := os.Stat(src)
  1009. if err != nil {
  1010. return err
  1011. }
  1012. if srcSt.IsDir() {
  1013. return fmt.Errorf("Can't copy a directory")
  1014. }
  1015. // Clean up the trailing slash. This must be done in an operating
  1016. // system specific manner.
  1017. if dst[len(dst)-1] == os.PathSeparator {
  1018. dst = filepath.Join(dst, filepath.Base(src))
  1019. }
  1020. // Create the holding directory if necessary
  1021. if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
  1022. return err
  1023. }
  1024. r, w := io.Pipe()
  1025. errC := make(chan error, 1)
  1026. go func() {
  1027. defer close(errC)
  1028. errC <- func() error {
  1029. defer w.Close()
  1030. srcF, err := os.Open(src)
  1031. if err != nil {
  1032. return err
  1033. }
  1034. defer srcF.Close()
  1035. hdr, err := tar.FileInfoHeader(srcSt, "")
  1036. if err != nil {
  1037. return err
  1038. }
  1039. hdr.Format = tar.FormatPAX
  1040. hdr.ModTime = hdr.ModTime.Truncate(time.Second)
  1041. hdr.AccessTime = time.Time{}
  1042. hdr.ChangeTime = time.Time{}
  1043. hdr.Name = filepath.Base(dst)
  1044. hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
  1045. if err := remapIDs(archiver.IDMapping, hdr); err != nil {
  1046. return err
  1047. }
  1048. tw := tar.NewWriter(w)
  1049. defer tw.Close()
  1050. if err := tw.WriteHeader(hdr); err != nil {
  1051. return err
  1052. }
  1053. if _, err := io.Copy(tw, srcF); err != nil {
  1054. return err
  1055. }
  1056. return nil
  1057. }()
  1058. }()
  1059. defer func() {
  1060. if er := <-errC; err == nil && er != nil {
  1061. err = er
  1062. }
  1063. }()
  1064. err = archiver.Untar(r, filepath.Dir(dst), nil)
  1065. if err != nil {
  1066. r.CloseWithError(err)
  1067. }
  1068. return err
  1069. }
  1070. // IdentityMapping returns the IdentityMapping of the archiver.
  1071. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping {
  1072. return archiver.IDMapping
  1073. }
  1074. func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error {
  1075. ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid})
  1076. hdr.Uid, hdr.Gid = ids.UID, ids.GID
  1077. return err
  1078. }
  1079. // cmdStream executes a command, and returns its stdout as a stream.
  1080. // If the command fails to run or doesn't complete successfully, an error
  1081. // will be returned, including anything written on stderr.
  1082. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
  1083. cmd.Stdin = input
  1084. pipeR, pipeW := io.Pipe()
  1085. cmd.Stdout = pipeW
  1086. var errBuf bytes.Buffer
  1087. cmd.Stderr = &errBuf
  1088. // Run the command and return the pipe
  1089. if err := cmd.Start(); err != nil {
  1090. return nil, err
  1091. }
  1092. // Ensure the command has exited before we clean anything up
  1093. done := make(chan struct{})
  1094. // Copy stdout to the returned pipe
  1095. go func() {
  1096. if err := cmd.Wait(); err != nil {
  1097. pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
  1098. } else {
  1099. pipeW.Close()
  1100. }
  1101. close(done)
  1102. }()
  1103. return ioutils.NewReadCloserWrapper(pipeR, func() error {
  1104. // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as
  1105. // cmd.Wait waits for any non-file stdout/stderr/stdin to close.
  1106. err := pipeR.Close()
  1107. <-done
  1108. return err
  1109. }), nil
  1110. }
  1111. // NewTempArchive reads the content of src into a temporary file, and returns the contents
  1112. // of that file as an archive. The archive can only be read once - as soon as reading completes,
  1113. // the file will be deleted.
  1114. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
  1115. f, err := ioutil.TempFile(dir, "")
  1116. if err != nil {
  1117. return nil, err
  1118. }
  1119. if _, err := io.Copy(f, src); err != nil {
  1120. return nil, err
  1121. }
  1122. if _, err := f.Seek(0, 0); err != nil {
  1123. return nil, err
  1124. }
  1125. st, err := f.Stat()
  1126. if err != nil {
  1127. return nil, err
  1128. }
  1129. size := st.Size()
  1130. return &TempArchive{File: f, Size: size}, nil
  1131. }
  1132. // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
  1133. // the file will be deleted.
  1134. type TempArchive struct {
  1135. *os.File
  1136. Size int64 // Pre-computed from Stat().Size() as a convenience
  1137. read int64
  1138. closed bool
  1139. }
  1140. // Close closes the underlying file if it's still open, or does a no-op
  1141. // to allow callers to try to close the TempArchive multiple times safely.
  1142. func (archive *TempArchive) Close() error {
  1143. if archive.closed {
  1144. return nil
  1145. }
  1146. archive.closed = true
  1147. return archive.File.Close()
  1148. }
  1149. func (archive *TempArchive) Read(data []byte) (int, error) {
  1150. n, err := archive.File.Read(data)
  1151. archive.read += int64(n)
  1152. if err != nil || archive.read == archive.Size {
  1153. archive.Close()
  1154. os.Remove(archive.File.Name())
  1155. }
  1156. return n, err
  1157. }