archive.go 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308
  1. package archive // import "github.com/docker/docker/pkg/archive"
  2. import (
  3. "archive/tar"
  4. "bufio"
  5. "bytes"
  6. "compress/bzip2"
  7. "compress/gzip"
  8. "context"
  9. "fmt"
  10. "io"
  11. "io/ioutil"
  12. "os"
  13. "path/filepath"
  14. "runtime"
  15. "strconv"
  16. "strings"
  17. "syscall"
  18. "time"
  19. "github.com/docker/docker/pkg/fileutils"
  20. "github.com/docker/docker/pkg/idtools"
  21. "github.com/docker/docker/pkg/ioutils"
  22. "github.com/docker/docker/pkg/pools"
  23. "github.com/docker/docker/pkg/system"
  24. "github.com/sirupsen/logrus"
  25. exec "golang.org/x/sys/execabs"
  26. )
  27. type (
  28. // Compression is the state represents if compressed or not.
  29. Compression int
  30. // WhiteoutFormat is the format of whiteouts unpacked
  31. WhiteoutFormat int
  32. // TarOptions wraps the tar options.
  33. TarOptions struct {
  34. IncludeFiles []string
  35. ExcludePatterns []string
  36. Compression Compression
  37. NoLchown bool
  38. UIDMaps []idtools.IDMap
  39. GIDMaps []idtools.IDMap
  40. ChownOpts *idtools.Identity
  41. IncludeSourceDir bool
  42. // WhiteoutFormat is the expected on disk format for whiteout files.
  43. // This format will be converted to the standard format on pack
  44. // and from the standard format on unpack.
  45. WhiteoutFormat WhiteoutFormat
  46. // When unpacking, specifies whether overwriting a directory with a
  47. // non-directory is allowed and vice versa.
  48. NoOverwriteDirNonDir bool
  49. // For each include when creating an archive, the included name will be
  50. // replaced with the matching name from this map.
  51. RebaseNames map[string]string
  52. InUserNS bool
  53. }
  54. )
  55. // Archiver implements the Archiver interface and allows the reuse of most utility functions of
  56. // this package with a pluggable Untar function. Also, to facilitate the passing of specific id
  57. // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
  58. type Archiver struct {
  59. Untar func(io.Reader, string, *TarOptions) error
  60. IDMapping *idtools.IdentityMapping
  61. }
  62. // NewDefaultArchiver returns a new Archiver without any IdentityMapping
  63. func NewDefaultArchiver() *Archiver {
  64. return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}}
  65. }
  66. // breakoutError is used to differentiate errors related to breaking out
  67. // When testing archive breakout in the unit tests, this error is expected
  68. // in order for the test to pass.
  69. type breakoutError error
  70. const (
  71. // Uncompressed represents the uncompressed.
  72. Uncompressed Compression = iota
  73. // Bzip2 is bzip2 compression algorithm.
  74. Bzip2
  75. // Gzip is gzip compression algorithm.
  76. Gzip
  77. // Xz is xz compression algorithm.
  78. Xz
  79. )
  80. const (
  81. // AUFSWhiteoutFormat is the default format for whiteouts
  82. AUFSWhiteoutFormat WhiteoutFormat = iota
  83. // OverlayWhiteoutFormat formats whiteout according to the overlay
  84. // standard.
  85. OverlayWhiteoutFormat
  86. )
  87. const (
  88. modeISDIR = 040000 // Directory
  89. modeISFIFO = 010000 // FIFO
  90. modeISREG = 0100000 // Regular file
  91. modeISLNK = 0120000 // Symbolic link
  92. modeISBLK = 060000 // Block special file
  93. modeISCHR = 020000 // Character special file
  94. modeISSOCK = 0140000 // Socket
  95. )
  96. // IsArchivePath checks if the (possibly compressed) file at the given path
  97. // starts with a tar file header.
  98. func IsArchivePath(path string) bool {
  99. file, err := os.Open(path)
  100. if err != nil {
  101. return false
  102. }
  103. defer file.Close()
  104. rdr, err := DecompressStream(file)
  105. if err != nil {
  106. return false
  107. }
  108. defer rdr.Close()
  109. r := tar.NewReader(rdr)
  110. _, err = r.Next()
  111. return err == nil
  112. }
  113. // DetectCompression detects the compression algorithm of the source.
  114. func DetectCompression(source []byte) Compression {
  115. for compression, m := range map[Compression][]byte{
  116. Bzip2: {0x42, 0x5A, 0x68},
  117. Gzip: {0x1F, 0x8B, 0x08},
  118. Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
  119. } {
  120. if len(source) < len(m) {
  121. logrus.Debug("Len too short")
  122. continue
  123. }
  124. if bytes.Equal(m, source[:len(m)]) {
  125. return compression
  126. }
  127. }
  128. return Uncompressed
  129. }
  130. func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) {
  131. args := []string{"xz", "-d", "-c", "-q"}
  132. return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive)
  133. }
  134. func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
  135. noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ")
  136. var noPigz bool
  137. if noPigzEnv != "" {
  138. var err error
  139. noPigz, err = strconv.ParseBool(noPigzEnv)
  140. if err != nil {
  141. logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var")
  142. }
  143. }
  144. if noPigz {
  145. logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv)
  146. return gzip.NewReader(buf)
  147. }
  148. unpigzPath, err := exec.LookPath("unpigz")
  149. if err != nil {
  150. logrus.Debugf("unpigz binary not found, falling back to go gzip library")
  151. return gzip.NewReader(buf)
  152. }
  153. logrus.Debugf("Using %s to decompress", unpigzPath)
  154. return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
  155. }
  156. func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser {
  157. return ioutils.NewReadCloserWrapper(readBuf, func() error {
  158. cancel()
  159. return readBuf.Close()
  160. })
  161. }
  162. // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
  163. func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
  164. p := pools.BufioReader32KPool
  165. buf := p.Get(archive)
  166. bs, err := buf.Peek(10)
  167. if err != nil && err != io.EOF {
  168. // Note: we'll ignore any io.EOF error because there are some odd
  169. // cases where the layer.tar file will be empty (zero bytes) and
  170. // that results in an io.EOF from the Peek() call. So, in those
  171. // cases we'll just treat it as a non-compressed stream and
  172. // that means just create an empty layer.
  173. // See Issue 18170
  174. return nil, err
  175. }
  176. compression := DetectCompression(bs)
  177. switch compression {
  178. case Uncompressed:
  179. readBufWrapper := p.NewReadCloserWrapper(buf, buf)
  180. return readBufWrapper, nil
  181. case Gzip:
  182. ctx, cancel := context.WithCancel(context.Background())
  183. gzReader, err := gzDecompress(ctx, buf)
  184. if err != nil {
  185. cancel()
  186. return nil, err
  187. }
  188. readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
  189. return wrapReadCloser(readBufWrapper, cancel), nil
  190. case Bzip2:
  191. bz2Reader := bzip2.NewReader(buf)
  192. readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
  193. return readBufWrapper, nil
  194. case Xz:
  195. ctx, cancel := context.WithCancel(context.Background())
  196. xzReader, err := xzDecompress(ctx, buf)
  197. if err != nil {
  198. cancel()
  199. return nil, err
  200. }
  201. readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
  202. return wrapReadCloser(readBufWrapper, cancel), nil
  203. default:
  204. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  205. }
  206. }
  207. // CompressStream compresses the dest with specified compression algorithm.
  208. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
  209. p := pools.BufioWriter32KPool
  210. buf := p.Get(dest)
  211. switch compression {
  212. case Uncompressed:
  213. writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
  214. return writeBufWrapper, nil
  215. case Gzip:
  216. gzWriter := gzip.NewWriter(dest)
  217. writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
  218. return writeBufWrapper, nil
  219. case Bzip2, Xz:
  220. // archive/bzip2 does not support writing, and there is no xz support at all
  221. // However, this is not a problem as docker only currently generates gzipped tars
  222. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  223. default:
  224. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  225. }
  226. }
  227. // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
  228. // modify the contents or header of an entry in the archive. If the file already
  229. // exists in the archive the TarModifierFunc will be called with the Header and
  230. // a reader which will return the files content. If the file does not exist both
  231. // header and content will be nil.
  232. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
  233. // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
  234. // tar stream are modified if they match any of the keys in mods.
  235. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
  236. pipeReader, pipeWriter := io.Pipe()
  237. go func() {
  238. tarReader := tar.NewReader(inputTarStream)
  239. tarWriter := tar.NewWriter(pipeWriter)
  240. defer inputTarStream.Close()
  241. defer tarWriter.Close()
  242. modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
  243. header, data, err := modifier(name, original, tarReader)
  244. switch {
  245. case err != nil:
  246. return err
  247. case header == nil:
  248. return nil
  249. }
  250. header.Name = name
  251. header.Size = int64(len(data))
  252. if err := tarWriter.WriteHeader(header); err != nil {
  253. return err
  254. }
  255. if len(data) != 0 {
  256. if _, err := tarWriter.Write(data); err != nil {
  257. return err
  258. }
  259. }
  260. return nil
  261. }
  262. var err error
  263. var originalHeader *tar.Header
  264. for {
  265. originalHeader, err = tarReader.Next()
  266. if err == io.EOF {
  267. break
  268. }
  269. if err != nil {
  270. pipeWriter.CloseWithError(err)
  271. return
  272. }
  273. modifier, ok := mods[originalHeader.Name]
  274. if !ok {
  275. // No modifiers for this file, copy the header and data
  276. if err := tarWriter.WriteHeader(originalHeader); err != nil {
  277. pipeWriter.CloseWithError(err)
  278. return
  279. }
  280. if _, err := pools.Copy(tarWriter, tarReader); err != nil {
  281. pipeWriter.CloseWithError(err)
  282. return
  283. }
  284. continue
  285. }
  286. delete(mods, originalHeader.Name)
  287. if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
  288. pipeWriter.CloseWithError(err)
  289. return
  290. }
  291. }
  292. // Apply the modifiers that haven't matched any files in the archive
  293. for name, modifier := range mods {
  294. if err := modify(name, nil, modifier, nil); err != nil {
  295. pipeWriter.CloseWithError(err)
  296. return
  297. }
  298. }
  299. pipeWriter.Close()
  300. }()
  301. return pipeReader
  302. }
  303. // Extension returns the extension of a file that uses the specified compression algorithm.
  304. func (compression *Compression) Extension() string {
  305. switch *compression {
  306. case Uncompressed:
  307. return "tar"
  308. case Bzip2:
  309. return "tar.bz2"
  310. case Gzip:
  311. return "tar.gz"
  312. case Xz:
  313. return "tar.xz"
  314. }
  315. return ""
  316. }
  317. // FileInfoHeader creates a populated Header from fi.
  318. // Compared to archive pkg this function fills in more information.
  319. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
  320. // which have been deleted since Go 1.9 archive/tar.
  321. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
  322. hdr, err := tar.FileInfoHeader(fi, link)
  323. if err != nil {
  324. return nil, err
  325. }
  326. hdr.Format = tar.FormatPAX
  327. hdr.ModTime = hdr.ModTime.Truncate(time.Second)
  328. hdr.AccessTime = time.Time{}
  329. hdr.ChangeTime = time.Time{}
  330. hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
  331. hdr.Name = canonicalTarName(name, fi.IsDir())
  332. if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
  333. return nil, err
  334. }
  335. return hdr, nil
  336. }
  337. // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
  338. // https://github.com/golang/go/commit/66b5a2f
  339. func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
  340. fm := fi.Mode()
  341. switch {
  342. case fm.IsRegular():
  343. mode |= modeISREG
  344. case fi.IsDir():
  345. mode |= modeISDIR
  346. case fm&os.ModeSymlink != 0:
  347. mode |= modeISLNK
  348. case fm&os.ModeDevice != 0:
  349. if fm&os.ModeCharDevice != 0 {
  350. mode |= modeISCHR
  351. } else {
  352. mode |= modeISBLK
  353. }
  354. case fm&os.ModeNamedPipe != 0:
  355. mode |= modeISFIFO
  356. case fm&os.ModeSocket != 0:
  357. mode |= modeISSOCK
  358. }
  359. return mode
  360. }
  361. // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
  362. // to a tar header
  363. func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
  364. capability, _ := system.Lgetxattr(path, "security.capability")
  365. if capability != nil {
  366. hdr.Xattrs = make(map[string]string)
  367. hdr.Xattrs["security.capability"] = string(capability)
  368. }
  369. return nil
  370. }
  371. type tarWhiteoutConverter interface {
  372. ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
  373. ConvertRead(*tar.Header, string) (bool, error)
  374. }
  375. type tarAppender struct {
  376. TarWriter *tar.Writer
  377. Buffer *bufio.Writer
  378. // for hardlink mapping
  379. SeenFiles map[uint64]string
  380. IdentityMapping *idtools.IdentityMapping
  381. ChownOpts *idtools.Identity
  382. // For packing and unpacking whiteout files in the
  383. // non standard format. The whiteout files defined
  384. // by the AUFS standard are used as the tar whiteout
  385. // standard.
  386. WhiteoutConverter tarWhiteoutConverter
  387. }
  388. func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
  389. return &tarAppender{
  390. SeenFiles: make(map[uint64]string),
  391. TarWriter: tar.NewWriter(writer),
  392. Buffer: pools.BufioWriter32KPool.Get(nil),
  393. IdentityMapping: idMapping,
  394. ChownOpts: chownOpts,
  395. }
  396. }
  397. // canonicalTarName provides a platform-independent and consistent posix-style
  398. // path for files and directories to be archived regardless of the platform.
  399. func canonicalTarName(name string, isDir bool) string {
  400. name = CanonicalTarNameForPath(name)
  401. // suffix with '/' for directories
  402. if isDir && !strings.HasSuffix(name, "/") {
  403. name += "/"
  404. }
  405. return name
  406. }
  407. // addTarFile adds to the tar archive a file from `path` as `name`
  408. func (ta *tarAppender) addTarFile(path, name string) error {
  409. fi, err := os.Lstat(path)
  410. if err != nil {
  411. return err
  412. }
  413. var link string
  414. if fi.Mode()&os.ModeSymlink != 0 {
  415. var err error
  416. link, err = os.Readlink(path)
  417. if err != nil {
  418. return err
  419. }
  420. }
  421. hdr, err := FileInfoHeader(name, fi, link)
  422. if err != nil {
  423. return err
  424. }
  425. if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
  426. return err
  427. }
  428. // if it's not a directory and has more than 1 link,
  429. // it's hard linked, so set the type flag accordingly
  430. if !fi.IsDir() && hasHardlinks(fi) {
  431. inode, err := getInodeFromStat(fi.Sys())
  432. if err != nil {
  433. return err
  434. }
  435. // a link should have a name that it links too
  436. // and that linked name should be first in the tar archive
  437. if oldpath, ok := ta.SeenFiles[inode]; ok {
  438. hdr.Typeflag = tar.TypeLink
  439. hdr.Linkname = oldpath
  440. hdr.Size = 0 // This Must be here for the writer math to add up!
  441. } else {
  442. ta.SeenFiles[inode] = name
  443. }
  444. }
  445. // check whether the file is overlayfs whiteout
  446. // if yes, skip re-mapping container ID mappings.
  447. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
  448. // handle re-mapping container ID mappings back to host ID mappings before
  449. // writing tar headers/files. We skip whiteout files because they were written
  450. // by the kernel and already have proper ownership relative to the host
  451. if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
  452. fileIDPair, err := getFileUIDGID(fi.Sys())
  453. if err != nil {
  454. return err
  455. }
  456. hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair)
  457. if err != nil {
  458. return err
  459. }
  460. }
  461. // explicitly override with ChownOpts
  462. if ta.ChownOpts != nil {
  463. hdr.Uid = ta.ChownOpts.UID
  464. hdr.Gid = ta.ChownOpts.GID
  465. }
  466. if ta.WhiteoutConverter != nil {
  467. wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
  468. if err != nil {
  469. return err
  470. }
  471. // If a new whiteout file exists, write original hdr, then
  472. // replace hdr with wo to be written after. Whiteouts should
  473. // always be written after the original. Note the original
  474. // hdr may have been updated to be a whiteout with returning
  475. // a whiteout header
  476. if wo != nil {
  477. if err := ta.TarWriter.WriteHeader(hdr); err != nil {
  478. return err
  479. }
  480. if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
  481. return fmt.Errorf("tar: cannot use whiteout for non-empty file")
  482. }
  483. hdr = wo
  484. }
  485. }
  486. if err := ta.TarWriter.WriteHeader(hdr); err != nil {
  487. return err
  488. }
  489. if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
  490. // We use system.OpenSequential to ensure we use sequential file
  491. // access on Windows to avoid depleting the standby list.
  492. // On Linux, this equates to a regular os.Open.
  493. file, err := system.OpenSequential(path)
  494. if err != nil {
  495. return err
  496. }
  497. ta.Buffer.Reset(ta.TarWriter)
  498. defer ta.Buffer.Reset(nil)
  499. _, err = io.Copy(ta.Buffer, file)
  500. file.Close()
  501. if err != nil {
  502. return err
  503. }
  504. err = ta.Buffer.Flush()
  505. if err != nil {
  506. return err
  507. }
  508. }
  509. return nil
  510. }
  511. func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error {
  512. // hdr.Mode is in linux format, which we can use for sycalls,
  513. // but for os.Foo() calls we need the mode converted to os.FileMode,
  514. // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
  515. hdrInfo := hdr.FileInfo()
  516. switch hdr.Typeflag {
  517. case tar.TypeDir:
  518. // Create directory unless it exists as a directory already.
  519. // In that case we just want to merge the two
  520. if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
  521. if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
  522. return err
  523. }
  524. }
  525. case tar.TypeReg, tar.TypeRegA:
  526. // Source is regular file. We use system.OpenFileSequential to use sequential
  527. // file access to avoid depleting the standby list on Windows.
  528. // On Linux, this equates to a regular os.OpenFile
  529. file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
  530. if err != nil {
  531. return err
  532. }
  533. if _, err := io.Copy(file, reader); err != nil {
  534. file.Close()
  535. return err
  536. }
  537. file.Close()
  538. case tar.TypeBlock, tar.TypeChar:
  539. if inUserns { // cannot create devices in a userns
  540. return nil
  541. }
  542. // Handle this is an OS-specific way
  543. if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
  544. return err
  545. }
  546. case tar.TypeFifo:
  547. // Handle this is an OS-specific way
  548. if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
  549. return err
  550. }
  551. case tar.TypeLink:
  552. targetPath := filepath.Join(extractDir, hdr.Linkname)
  553. // check for hardlink breakout
  554. if !strings.HasPrefix(targetPath, extractDir) {
  555. return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
  556. }
  557. if err := os.Link(targetPath, path); err != nil {
  558. return err
  559. }
  560. case tar.TypeSymlink:
  561. // path -> hdr.Linkname = targetPath
  562. // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
  563. targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
  564. // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
  565. // that symlink would first have to be created, which would be caught earlier, at this very check:
  566. if !strings.HasPrefix(targetPath, extractDir) {
  567. return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
  568. }
  569. if err := os.Symlink(hdr.Linkname, path); err != nil {
  570. return err
  571. }
  572. case tar.TypeXGlobalHeader:
  573. logrus.Debug("PAX Global Extended Headers found and ignored")
  574. return nil
  575. default:
  576. return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
  577. }
  578. // Lchown is not supported on Windows.
  579. if Lchown && runtime.GOOS != "windows" {
  580. if chownOpts == nil {
  581. chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}
  582. }
  583. if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
  584. return err
  585. }
  586. }
  587. var errors []string
  588. for key, value := range hdr.Xattrs {
  589. if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
  590. if err == syscall.ENOTSUP || err == syscall.EPERM {
  591. // We ignore errors here because not all graphdrivers support
  592. // xattrs *cough* old versions of AUFS *cough*. However only
  593. // ENOTSUP should be emitted in that case, otherwise we still
  594. // bail.
  595. // EPERM occurs if modifying xattrs is not allowed. This can
  596. // happen when running in userns with restrictions (ChromeOS).
  597. errors = append(errors, err.Error())
  598. continue
  599. }
  600. return err
  601. }
  602. }
  603. if len(errors) > 0 {
  604. logrus.WithFields(logrus.Fields{
  605. "errors": errors,
  606. }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
  607. }
  608. // There is no LChmod, so ignore mode for symlink. Also, this
  609. // must happen after chown, as that can modify the file mode
  610. if err := handleLChmod(hdr, path, hdrInfo); err != nil {
  611. return err
  612. }
  613. aTime := hdr.AccessTime
  614. if aTime.Before(hdr.ModTime) {
  615. // Last access time should never be before last modified time.
  616. aTime = hdr.ModTime
  617. }
  618. // system.Chtimes doesn't support a NOFOLLOW flag atm
  619. if hdr.Typeflag == tar.TypeLink {
  620. if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
  621. if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
  622. return err
  623. }
  624. }
  625. } else if hdr.Typeflag != tar.TypeSymlink {
  626. if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
  627. return err
  628. }
  629. } else {
  630. ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
  631. if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
  632. return err
  633. }
  634. }
  635. return nil
  636. }
  637. // Tar creates an archive from the directory at `path`, and returns it as a
  638. // stream of bytes.
  639. func Tar(path string, compression Compression) (io.ReadCloser, error) {
  640. return TarWithOptions(path, &TarOptions{Compression: compression})
  641. }
  642. // TarWithOptions creates an archive from the directory at `path`, only including files whose relative
  643. // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
  644. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
  645. // Fix the source path to work with long path names. This is a no-op
  646. // on platforms other than Windows.
  647. srcPath = fixVolumePathPrefix(srcPath)
  648. pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
  649. if err != nil {
  650. return nil, err
  651. }
  652. pipeReader, pipeWriter := io.Pipe()
  653. compressWriter, err := CompressStream(pipeWriter, options.Compression)
  654. if err != nil {
  655. return nil, err
  656. }
  657. whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
  658. if err != nil {
  659. return nil, err
  660. }
  661. go func() {
  662. ta := newTarAppender(
  663. idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
  664. compressWriter,
  665. options.ChownOpts,
  666. )
  667. ta.WhiteoutConverter = whiteoutConverter
  668. defer func() {
  669. // Make sure to check the error on Close.
  670. if err := ta.TarWriter.Close(); err != nil {
  671. logrus.Errorf("Can't close tar writer: %s", err)
  672. }
  673. if err := compressWriter.Close(); err != nil {
  674. logrus.Errorf("Can't close compress writer: %s", err)
  675. }
  676. if err := pipeWriter.Close(); err != nil {
  677. logrus.Errorf("Can't close pipe writer: %s", err)
  678. }
  679. }()
  680. // this buffer is needed for the duration of this piped stream
  681. defer pools.BufioWriter32KPool.Put(ta.Buffer)
  682. // In general we log errors here but ignore them because
  683. // during e.g. a diff operation the container can continue
  684. // mutating the filesystem and we can see transient errors
  685. // from this
  686. stat, err := os.Lstat(srcPath)
  687. if err != nil {
  688. return
  689. }
  690. if !stat.IsDir() {
  691. // We can't later join a non-dir with any includes because the
  692. // 'walk' will error if "file/." is stat-ed and "file" is not a
  693. // directory. So, we must split the source path and use the
  694. // basename as the include.
  695. if len(options.IncludeFiles) > 0 {
  696. logrus.Warn("Tar: Can't archive a file with includes")
  697. }
  698. dir, base := SplitPathDirEntry(srcPath)
  699. srcPath = dir
  700. options.IncludeFiles = []string{base}
  701. }
  702. if len(options.IncludeFiles) == 0 {
  703. options.IncludeFiles = []string{"."}
  704. }
  705. seen := make(map[string]bool)
  706. for _, include := range options.IncludeFiles {
  707. rebaseName := options.RebaseNames[include]
  708. walkRoot := getWalkRoot(srcPath, include)
  709. filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
  710. if err != nil {
  711. logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
  712. return nil
  713. }
  714. relFilePath, err := filepath.Rel(srcPath, filePath)
  715. if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
  716. // Error getting relative path OR we are looking
  717. // at the source directory path. Skip in both situations.
  718. return nil
  719. }
  720. if options.IncludeSourceDir && include == "." && relFilePath != "." {
  721. relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
  722. }
  723. skip := false
  724. // If "include" is an exact match for the current file
  725. // then even if there's an "excludePatterns" pattern that
  726. // matches it, don't skip it. IOW, assume an explicit 'include'
  727. // is asking for that file no matter what - which is true
  728. // for some files, like .dockerignore and Dockerfile (sometimes)
  729. if include != relFilePath {
  730. skip, err = pm.Matches(relFilePath)
  731. if err != nil {
  732. logrus.Errorf("Error matching %s: %v", relFilePath, err)
  733. return err
  734. }
  735. }
  736. if skip {
  737. // If we want to skip this file and its a directory
  738. // then we should first check to see if there's an
  739. // excludes pattern (e.g. !dir/file) that starts with this
  740. // dir. If so then we can't skip this dir.
  741. // Its not a dir then so we can just return/skip.
  742. if !f.IsDir() {
  743. return nil
  744. }
  745. // No exceptions (!...) in patterns so just skip dir
  746. if !pm.Exclusions() {
  747. return filepath.SkipDir
  748. }
  749. dirSlash := relFilePath + string(filepath.Separator)
  750. for _, pat := range pm.Patterns() {
  751. if !pat.Exclusion() {
  752. continue
  753. }
  754. if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
  755. // found a match - so can't skip this dir
  756. return nil
  757. }
  758. }
  759. // No matching exclusion dir so just skip dir
  760. return filepath.SkipDir
  761. }
  762. if seen[relFilePath] {
  763. return nil
  764. }
  765. seen[relFilePath] = true
  766. // Rename the base resource.
  767. if rebaseName != "" {
  768. var replacement string
  769. if rebaseName != string(filepath.Separator) {
  770. // Special case the root directory to replace with an
  771. // empty string instead so that we don't end up with
  772. // double slashes in the paths.
  773. replacement = rebaseName
  774. }
  775. relFilePath = strings.Replace(relFilePath, include, replacement, 1)
  776. }
  777. if err := ta.addTarFile(filePath, relFilePath); err != nil {
  778. logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
  779. // if pipe is broken, stop writing tar stream to it
  780. if err == io.ErrClosedPipe {
  781. return err
  782. }
  783. }
  784. return nil
  785. })
  786. }
  787. }()
  788. return pipeReader, nil
  789. }
  790. // Unpack unpacks the decompressedArchive to dest with options.
  791. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
  792. tr := tar.NewReader(decompressedArchive)
  793. trBuf := pools.BufioReader32KPool.Get(nil)
  794. defer pools.BufioReader32KPool.Put(trBuf)
  795. var dirs []*tar.Header
  796. idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
  797. rootIDs := idMapping.RootPair()
  798. whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
  799. if err != nil {
  800. return err
  801. }
  802. // Iterate through the files in the archive.
  803. loop:
  804. for {
  805. hdr, err := tr.Next()
  806. if err == io.EOF {
  807. // end of tar archive
  808. break
  809. }
  810. if err != nil {
  811. return err
  812. }
  813. // ignore XGlobalHeader early to avoid creating parent directories for them
  814. if hdr.Typeflag == tar.TypeXGlobalHeader {
  815. logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name)
  816. continue
  817. }
  818. // Normalize name, for safety and for a simple is-root check
  819. // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
  820. // This keeps "..\" as-is, but normalizes "\..\" to "\".
  821. hdr.Name = filepath.Clean(hdr.Name)
  822. for _, exclude := range options.ExcludePatterns {
  823. if strings.HasPrefix(hdr.Name, exclude) {
  824. continue loop
  825. }
  826. }
  827. // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
  828. // the filepath format for the OS on which the daemon is running. Hence
  829. // the check for a slash-suffix MUST be done in an OS-agnostic way.
  830. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
  831. // Not the root directory, ensure that the parent directory exists
  832. parent := filepath.Dir(hdr.Name)
  833. parentPath := filepath.Join(dest, parent)
  834. if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
  835. err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs)
  836. if err != nil {
  837. return err
  838. }
  839. }
  840. }
  841. path := filepath.Join(dest, hdr.Name)
  842. rel, err := filepath.Rel(dest, path)
  843. if err != nil {
  844. return err
  845. }
  846. if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
  847. return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
  848. }
  849. // If path exits we almost always just want to remove and replace it
  850. // The only exception is when it is a directory *and* the file from
  851. // the layer is also a directory. Then we want to merge them (i.e.
  852. // just apply the metadata from the layer).
  853. if fi, err := os.Lstat(path); err == nil {
  854. if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
  855. // If NoOverwriteDirNonDir is true then we cannot replace
  856. // an existing directory with a non-directory from the archive.
  857. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
  858. }
  859. if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
  860. // If NoOverwriteDirNonDir is true then we cannot replace
  861. // an existing non-directory with a directory from the archive.
  862. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
  863. }
  864. if fi.IsDir() && hdr.Name == "." {
  865. continue
  866. }
  867. if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
  868. if err := os.RemoveAll(path); err != nil {
  869. return err
  870. }
  871. }
  872. }
  873. trBuf.Reset(tr)
  874. if err := remapIDs(idMapping, hdr); err != nil {
  875. return err
  876. }
  877. if whiteoutConverter != nil {
  878. writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
  879. if err != nil {
  880. return err
  881. }
  882. if !writeFile {
  883. continue
  884. }
  885. }
  886. if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
  887. return err
  888. }
  889. // Directory mtimes must be handled at the end to avoid further
  890. // file creation in them to modify the directory mtime
  891. if hdr.Typeflag == tar.TypeDir {
  892. dirs = append(dirs, hdr)
  893. }
  894. }
  895. for _, hdr := range dirs {
  896. path := filepath.Join(dest, hdr.Name)
  897. if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
  898. return err
  899. }
  900. }
  901. return nil
  902. }
  903. // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
  904. // and unpacks it into the directory at `dest`.
  905. // The archive may be compressed with one of the following algorithms:
  906. // identity (uncompressed), gzip, bzip2, xz.
  907. // FIXME: specify behavior when target path exists vs. doesn't exist.
  908. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
  909. return untarHandler(tarArchive, dest, options, true)
  910. }
  911. // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
  912. // and unpacks it into the directory at `dest`.
  913. // The archive must be an uncompressed stream.
  914. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
  915. return untarHandler(tarArchive, dest, options, false)
  916. }
  917. // Handler for teasing out the automatic decompression
  918. func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
  919. if tarArchive == nil {
  920. return fmt.Errorf("Empty archive")
  921. }
  922. dest = filepath.Clean(dest)
  923. if options == nil {
  924. options = &TarOptions{}
  925. }
  926. if options.ExcludePatterns == nil {
  927. options.ExcludePatterns = []string{}
  928. }
  929. r := tarArchive
  930. if decompress {
  931. decompressedArchive, err := DecompressStream(tarArchive)
  932. if err != nil {
  933. return err
  934. }
  935. defer decompressedArchive.Close()
  936. r = decompressedArchive
  937. }
  938. return Unpack(r, dest, options)
  939. }
  940. // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
  941. // If either Tar or Untar fails, TarUntar aborts and returns the error.
  942. func (archiver *Archiver) TarUntar(src, dst string) error {
  943. logrus.Debugf("TarUntar(%s %s)", src, dst)
  944. archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
  945. if err != nil {
  946. return err
  947. }
  948. defer archive.Close()
  949. options := &TarOptions{
  950. UIDMaps: archiver.IDMapping.UIDs(),
  951. GIDMaps: archiver.IDMapping.GIDs(),
  952. }
  953. return archiver.Untar(archive, dst, options)
  954. }
  955. // UntarPath untar a file from path to a destination, src is the source tar file path.
  956. func (archiver *Archiver) UntarPath(src, dst string) error {
  957. archive, err := os.Open(src)
  958. if err != nil {
  959. return err
  960. }
  961. defer archive.Close()
  962. options := &TarOptions{
  963. UIDMaps: archiver.IDMapping.UIDs(),
  964. GIDMaps: archiver.IDMapping.GIDs(),
  965. }
  966. return archiver.Untar(archive, dst, options)
  967. }
  968. // CopyWithTar creates a tar archive of filesystem path `src`, and
  969. // unpacks it at filesystem path `dst`.
  970. // The archive is streamed directly with fixed buffering and no
  971. // intermediary disk IO.
  972. func (archiver *Archiver) CopyWithTar(src, dst string) error {
  973. srcSt, err := os.Stat(src)
  974. if err != nil {
  975. return err
  976. }
  977. if !srcSt.IsDir() {
  978. return archiver.CopyFileWithTar(src, dst)
  979. }
  980. // if this Archiver is set up with ID mapping we need to create
  981. // the new destination directory with the remapped root UID/GID pair
  982. // as owner
  983. rootIDs := archiver.IDMapping.RootPair()
  984. // Create dst, copy src's content into it
  985. logrus.Debugf("Creating dest directory: %s", dst)
  986. if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
  987. return err
  988. }
  989. logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
  990. return archiver.TarUntar(src, dst)
  991. }
  992. // CopyFileWithTar emulates the behavior of the 'cp' command-line
  993. // for a single file. It copies a regular file from path `src` to
  994. // path `dst`, and preserves all its metadata.
  995. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
  996. logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
  997. srcSt, err := os.Stat(src)
  998. if err != nil {
  999. return err
  1000. }
  1001. if srcSt.IsDir() {
  1002. return fmt.Errorf("Can't copy a directory")
  1003. }
  1004. // Clean up the trailing slash. This must be done in an operating
  1005. // system specific manner.
  1006. if dst[len(dst)-1] == os.PathSeparator {
  1007. dst = filepath.Join(dst, filepath.Base(src))
  1008. }
  1009. // Create the holding directory if necessary
  1010. if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
  1011. return err
  1012. }
  1013. r, w := io.Pipe()
  1014. errC := make(chan error, 1)
  1015. go func() {
  1016. defer close(errC)
  1017. errC <- func() error {
  1018. defer w.Close()
  1019. srcF, err := os.Open(src)
  1020. if err != nil {
  1021. return err
  1022. }
  1023. defer srcF.Close()
  1024. hdr, err := tar.FileInfoHeader(srcSt, "")
  1025. if err != nil {
  1026. return err
  1027. }
  1028. hdr.Format = tar.FormatPAX
  1029. hdr.ModTime = hdr.ModTime.Truncate(time.Second)
  1030. hdr.AccessTime = time.Time{}
  1031. hdr.ChangeTime = time.Time{}
  1032. hdr.Name = filepath.Base(dst)
  1033. hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
  1034. if err := remapIDs(archiver.IDMapping, hdr); err != nil {
  1035. return err
  1036. }
  1037. tw := tar.NewWriter(w)
  1038. defer tw.Close()
  1039. if err := tw.WriteHeader(hdr); err != nil {
  1040. return err
  1041. }
  1042. if _, err := io.Copy(tw, srcF); err != nil {
  1043. return err
  1044. }
  1045. return nil
  1046. }()
  1047. }()
  1048. defer func() {
  1049. if er := <-errC; err == nil && er != nil {
  1050. err = er
  1051. }
  1052. }()
  1053. err = archiver.Untar(r, filepath.Dir(dst), nil)
  1054. if err != nil {
  1055. r.CloseWithError(err)
  1056. }
  1057. return err
  1058. }
  1059. // IdentityMapping returns the IdentityMapping of the archiver.
  1060. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping {
  1061. return archiver.IDMapping
  1062. }
  1063. func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error {
  1064. ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid})
  1065. hdr.Uid, hdr.Gid = ids.UID, ids.GID
  1066. return err
  1067. }
  1068. // cmdStream executes a command, and returns its stdout as a stream.
  1069. // If the command fails to run or doesn't complete successfully, an error
  1070. // will be returned, including anything written on stderr.
  1071. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
  1072. cmd.Stdin = input
  1073. pipeR, pipeW := io.Pipe()
  1074. cmd.Stdout = pipeW
  1075. var errBuf bytes.Buffer
  1076. cmd.Stderr = &errBuf
  1077. // Run the command and return the pipe
  1078. if err := cmd.Start(); err != nil {
  1079. return nil, err
  1080. }
  1081. // Ensure the command has exited before we clean anything up
  1082. done := make(chan struct{})
  1083. // Copy stdout to the returned pipe
  1084. go func() {
  1085. if err := cmd.Wait(); err != nil {
  1086. pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
  1087. } else {
  1088. pipeW.Close()
  1089. }
  1090. close(done)
  1091. }()
  1092. return ioutils.NewReadCloserWrapper(pipeR, func() error {
  1093. // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as
  1094. // cmd.Wait waits for any non-file stdout/stderr/stdin to close.
  1095. err := pipeR.Close()
  1096. <-done
  1097. return err
  1098. }), nil
  1099. }
  1100. // NewTempArchive reads the content of src into a temporary file, and returns the contents
  1101. // of that file as an archive. The archive can only be read once - as soon as reading completes,
  1102. // the file will be deleted.
  1103. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
  1104. f, err := ioutil.TempFile(dir, "")
  1105. if err != nil {
  1106. return nil, err
  1107. }
  1108. if _, err := io.Copy(f, src); err != nil {
  1109. return nil, err
  1110. }
  1111. if _, err := f.Seek(0, 0); err != nil {
  1112. return nil, err
  1113. }
  1114. st, err := f.Stat()
  1115. if err != nil {
  1116. return nil, err
  1117. }
  1118. size := st.Size()
  1119. return &TempArchive{File: f, Size: size}, nil
  1120. }
  1121. // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
  1122. // the file will be deleted.
  1123. type TempArchive struct {
  1124. *os.File
  1125. Size int64 // Pre-computed from Stat().Size() as a convenience
  1126. read int64
  1127. closed bool
  1128. }
  1129. // Close closes the underlying file if it's still open, or does a no-op
  1130. // to allow callers to try to close the TempArchive multiple times safely.
  1131. func (archive *TempArchive) Close() error {
  1132. if archive.closed {
  1133. return nil
  1134. }
  1135. archive.closed = true
  1136. return archive.File.Close()
  1137. }
  1138. func (archive *TempArchive) Read(data []byte) (int, error) {
  1139. n, err := archive.File.Read(data)
  1140. archive.read += int64(n)
  1141. if err != nil || archive.read == archive.Size {
  1142. archive.Close()
  1143. os.Remove(archive.File.Name())
  1144. }
  1145. return n, err
  1146. }