archive.go 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284
  1. package archive // import "github.com/docker/docker/pkg/archive"
  2. import (
  3. "archive/tar"
  4. "bufio"
  5. "bytes"
  6. "compress/bzip2"
  7. "compress/gzip"
  8. "context"
  9. "fmt"
  10. "io"
  11. "io/ioutil"
  12. "os"
  13. "os/exec"
  14. "path/filepath"
  15. "runtime"
  16. "strconv"
  17. "strings"
  18. "syscall"
  19. "time"
  20. "github.com/docker/docker/pkg/fileutils"
  21. "github.com/docker/docker/pkg/idtools"
  22. "github.com/docker/docker/pkg/ioutils"
  23. "github.com/docker/docker/pkg/pools"
  24. "github.com/docker/docker/pkg/system"
  25. "github.com/sirupsen/logrus"
  26. )
  27. var unpigzPath string
  28. func init() {
  29. if path, err := exec.LookPath("unpigz"); err != nil {
  30. logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library")
  31. } else {
  32. logrus.Debugf("Using unpigz binary found at path %s", path)
  33. unpigzPath = path
  34. }
  35. }
  36. type (
  37. // Compression is the state represents if compressed or not.
  38. Compression int
  39. // WhiteoutFormat is the format of whiteouts unpacked
  40. WhiteoutFormat int
  41. // TarOptions wraps the tar options.
  42. TarOptions struct {
  43. IncludeFiles []string
  44. ExcludePatterns []string
  45. Compression Compression
  46. NoLchown bool
  47. UIDMaps []idtools.IDMap
  48. GIDMaps []idtools.IDMap
  49. ChownOpts *idtools.Identity
  50. IncludeSourceDir bool
  51. // WhiteoutFormat is the expected on disk format for whiteout files.
  52. // This format will be converted to the standard format on pack
  53. // and from the standard format on unpack.
  54. WhiteoutFormat WhiteoutFormat
  55. // When unpacking, specifies whether overwriting a directory with a
  56. // non-directory is allowed and vice versa.
  57. NoOverwriteDirNonDir bool
  58. // For each include when creating an archive, the included name will be
  59. // replaced with the matching name from this map.
  60. RebaseNames map[string]string
  61. InUserNS bool
  62. }
  63. )
  64. // Archiver implements the Archiver interface and allows the reuse of most utility functions of
  65. // this package with a pluggable Untar function. Also, to facilitate the passing of specific id
  66. // mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
  67. type Archiver struct {
  68. Untar func(io.Reader, string, *TarOptions) error
  69. IDMapping *idtools.IdentityMapping
  70. }
  71. // NewDefaultArchiver returns a new Archiver without any IdentityMapping
  72. func NewDefaultArchiver() *Archiver {
  73. return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}}
  74. }
  75. // breakoutError is used to differentiate errors related to breaking out
  76. // When testing archive breakout in the unit tests, this error is expected
  77. // in order for the test to pass.
  78. type breakoutError error
  79. const (
  80. // Uncompressed represents the uncompressed.
  81. Uncompressed Compression = iota
  82. // Bzip2 is bzip2 compression algorithm.
  83. Bzip2
  84. // Gzip is gzip compression algorithm.
  85. Gzip
  86. // Xz is xz compression algorithm.
  87. Xz
  88. )
  89. const (
  90. // AUFSWhiteoutFormat is the default format for whiteouts
  91. AUFSWhiteoutFormat WhiteoutFormat = iota
  92. // OverlayWhiteoutFormat formats whiteout according to the overlay
  93. // standard.
  94. OverlayWhiteoutFormat
  95. )
  96. const (
  97. modeISDIR = 040000 // Directory
  98. modeISFIFO = 010000 // FIFO
  99. modeISREG = 0100000 // Regular file
  100. modeISLNK = 0120000 // Symbolic link
  101. modeISBLK = 060000 // Block special file
  102. modeISCHR = 020000 // Character special file
  103. modeISSOCK = 0140000 // Socket
  104. )
  105. // IsArchivePath checks if the (possibly compressed) file at the given path
  106. // starts with a tar file header.
  107. func IsArchivePath(path string) bool {
  108. file, err := os.Open(path)
  109. if err != nil {
  110. return false
  111. }
  112. defer file.Close()
  113. rdr, err := DecompressStream(file)
  114. if err != nil {
  115. return false
  116. }
  117. defer rdr.Close()
  118. r := tar.NewReader(rdr)
  119. _, err = r.Next()
  120. return err == nil
  121. }
  122. // DetectCompression detects the compression algorithm of the source.
  123. func DetectCompression(source []byte) Compression {
  124. for compression, m := range map[Compression][]byte{
  125. Bzip2: {0x42, 0x5A, 0x68},
  126. Gzip: {0x1F, 0x8B, 0x08},
  127. Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
  128. } {
  129. if len(source) < len(m) {
  130. logrus.Debug("Len too short")
  131. continue
  132. }
  133. if bytes.Equal(m, source[:len(m)]) {
  134. return compression
  135. }
  136. }
  137. return Uncompressed
  138. }
  139. func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) {
  140. args := []string{"xz", "-d", "-c", "-q"}
  141. return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive)
  142. }
  143. func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
  144. if unpigzPath == "" {
  145. return gzip.NewReader(buf)
  146. }
  147. disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ")
  148. if disablePigzEnv != "" {
  149. if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil {
  150. return nil, err
  151. } else if disablePigz {
  152. return gzip.NewReader(buf)
  153. }
  154. }
  155. return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
  156. }
  157. func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser {
  158. return ioutils.NewReadCloserWrapper(readBuf, func() error {
  159. cancel()
  160. return readBuf.Close()
  161. })
  162. }
  163. // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
  164. func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
  165. p := pools.BufioReader32KPool
  166. buf := p.Get(archive)
  167. bs, err := buf.Peek(10)
  168. if err != nil && err != io.EOF {
  169. // Note: we'll ignore any io.EOF error because there are some odd
  170. // cases where the layer.tar file will be empty (zero bytes) and
  171. // that results in an io.EOF from the Peek() call. So, in those
  172. // cases we'll just treat it as a non-compressed stream and
  173. // that means just create an empty layer.
  174. // See Issue 18170
  175. return nil, err
  176. }
  177. compression := DetectCompression(bs)
  178. switch compression {
  179. case Uncompressed:
  180. readBufWrapper := p.NewReadCloserWrapper(buf, buf)
  181. return readBufWrapper, nil
  182. case Gzip:
  183. ctx, cancel := context.WithCancel(context.Background())
  184. gzReader, err := gzDecompress(ctx, buf)
  185. if err != nil {
  186. cancel()
  187. return nil, err
  188. }
  189. readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
  190. return wrapReadCloser(readBufWrapper, cancel), nil
  191. case Bzip2:
  192. bz2Reader := bzip2.NewReader(buf)
  193. readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
  194. return readBufWrapper, nil
  195. case Xz:
  196. ctx, cancel := context.WithCancel(context.Background())
  197. xzReader, err := xzDecompress(ctx, buf)
  198. if err != nil {
  199. cancel()
  200. return nil, err
  201. }
  202. readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
  203. return wrapReadCloser(readBufWrapper, cancel), nil
  204. default:
  205. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  206. }
  207. }
  208. // CompressStream compresses the dest with specified compression algorithm.
  209. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
  210. p := pools.BufioWriter32KPool
  211. buf := p.Get(dest)
  212. switch compression {
  213. case Uncompressed:
  214. writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
  215. return writeBufWrapper, nil
  216. case Gzip:
  217. gzWriter := gzip.NewWriter(dest)
  218. writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
  219. return writeBufWrapper, nil
  220. case Bzip2, Xz:
  221. // archive/bzip2 does not support writing, and there is no xz support at all
  222. // However, this is not a problem as docker only currently generates gzipped tars
  223. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  224. default:
  225. return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
  226. }
  227. }
  228. // TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to
  229. // modify the contents or header of an entry in the archive. If the file already
  230. // exists in the archive the TarModifierFunc will be called with the Header and
  231. // a reader which will return the files content. If the file does not exist both
  232. // header and content will be nil.
  233. type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)
  234. // ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the
  235. // tar stream are modified if they match any of the keys in mods.
  236. func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser {
  237. pipeReader, pipeWriter := io.Pipe()
  238. go func() {
  239. tarReader := tar.NewReader(inputTarStream)
  240. tarWriter := tar.NewWriter(pipeWriter)
  241. defer inputTarStream.Close()
  242. defer tarWriter.Close()
  243. modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error {
  244. header, data, err := modifier(name, original, tarReader)
  245. switch {
  246. case err != nil:
  247. return err
  248. case header == nil:
  249. return nil
  250. }
  251. header.Name = name
  252. header.Size = int64(len(data))
  253. if err := tarWriter.WriteHeader(header); err != nil {
  254. return err
  255. }
  256. if len(data) != 0 {
  257. if _, err := tarWriter.Write(data); err != nil {
  258. return err
  259. }
  260. }
  261. return nil
  262. }
  263. var err error
  264. var originalHeader *tar.Header
  265. for {
  266. originalHeader, err = tarReader.Next()
  267. if err == io.EOF {
  268. break
  269. }
  270. if err != nil {
  271. pipeWriter.CloseWithError(err)
  272. return
  273. }
  274. modifier, ok := mods[originalHeader.Name]
  275. if !ok {
  276. // No modifiers for this file, copy the header and data
  277. if err := tarWriter.WriteHeader(originalHeader); err != nil {
  278. pipeWriter.CloseWithError(err)
  279. return
  280. }
  281. if _, err := pools.Copy(tarWriter, tarReader); err != nil {
  282. pipeWriter.CloseWithError(err)
  283. return
  284. }
  285. continue
  286. }
  287. delete(mods, originalHeader.Name)
  288. if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil {
  289. pipeWriter.CloseWithError(err)
  290. return
  291. }
  292. }
  293. // Apply the modifiers that haven't matched any files in the archive
  294. for name, modifier := range mods {
  295. if err := modify(name, nil, modifier, nil); err != nil {
  296. pipeWriter.CloseWithError(err)
  297. return
  298. }
  299. }
  300. pipeWriter.Close()
  301. }()
  302. return pipeReader
  303. }
  304. // Extension returns the extension of a file that uses the specified compression algorithm.
  305. func (compression *Compression) Extension() string {
  306. switch *compression {
  307. case Uncompressed:
  308. return "tar"
  309. case Bzip2:
  310. return "tar.bz2"
  311. case Gzip:
  312. return "tar.gz"
  313. case Xz:
  314. return "tar.xz"
  315. }
  316. return ""
  317. }
  318. // FileInfoHeader creates a populated Header from fi.
  319. // Compared to archive pkg this function fills in more information.
  320. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
  321. // which have been deleted since Go 1.9 archive/tar.
  322. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
  323. hdr, err := tar.FileInfoHeader(fi, link)
  324. if err != nil {
  325. return nil, err
  326. }
  327. hdr.Format = tar.FormatPAX
  328. hdr.ModTime = hdr.ModTime.Truncate(time.Second)
  329. hdr.AccessTime = time.Time{}
  330. hdr.ChangeTime = time.Time{}
  331. hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
  332. hdr.Name = canonicalTarName(name, fi.IsDir())
  333. if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
  334. return nil, err
  335. }
  336. return hdr, nil
  337. }
  338. // fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar
  339. // https://github.com/golang/go/commit/66b5a2f
  340. func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 {
  341. fm := fi.Mode()
  342. switch {
  343. case fm.IsRegular():
  344. mode |= modeISREG
  345. case fi.IsDir():
  346. mode |= modeISDIR
  347. case fm&os.ModeSymlink != 0:
  348. mode |= modeISLNK
  349. case fm&os.ModeDevice != 0:
  350. if fm&os.ModeCharDevice != 0 {
  351. mode |= modeISCHR
  352. } else {
  353. mode |= modeISBLK
  354. }
  355. case fm&os.ModeNamedPipe != 0:
  356. mode |= modeISFIFO
  357. case fm&os.ModeSocket != 0:
  358. mode |= modeISSOCK
  359. }
  360. return mode
  361. }
  362. // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
  363. // to a tar header
  364. func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
  365. capability, _ := system.Lgetxattr(path, "security.capability")
  366. if capability != nil {
  367. hdr.Xattrs = make(map[string]string)
  368. hdr.Xattrs["security.capability"] = string(capability)
  369. }
  370. return nil
  371. }
  372. type tarWhiteoutConverter interface {
  373. ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
  374. ConvertRead(*tar.Header, string) (bool, error)
  375. }
  376. type tarAppender struct {
  377. TarWriter *tar.Writer
  378. Buffer *bufio.Writer
  379. // for hardlink mapping
  380. SeenFiles map[uint64]string
  381. IdentityMapping *idtools.IdentityMapping
  382. ChownOpts *idtools.Identity
  383. // For packing and unpacking whiteout files in the
  384. // non standard format. The whiteout files defined
  385. // by the AUFS standard are used as the tar whiteout
  386. // standard.
  387. WhiteoutConverter tarWhiteoutConverter
  388. }
  389. func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
  390. return &tarAppender{
  391. SeenFiles: make(map[uint64]string),
  392. TarWriter: tar.NewWriter(writer),
  393. Buffer: pools.BufioWriter32KPool.Get(nil),
  394. IdentityMapping: idMapping,
  395. ChownOpts: chownOpts,
  396. }
  397. }
  398. // canonicalTarName provides a platform-independent and consistent posix-style
  399. //path for files and directories to be archived regardless of the platform.
  400. func canonicalTarName(name string, isDir bool) string {
  401. name = CanonicalTarNameForPath(name)
  402. // suffix with '/' for directories
  403. if isDir && !strings.HasSuffix(name, "/") {
  404. name += "/"
  405. }
  406. return name
  407. }
  408. // addTarFile adds to the tar archive a file from `path` as `name`
  409. func (ta *tarAppender) addTarFile(path, name string) error {
  410. fi, err := os.Lstat(path)
  411. if err != nil {
  412. return err
  413. }
  414. var link string
  415. if fi.Mode()&os.ModeSymlink != 0 {
  416. var err error
  417. link, err = os.Readlink(path)
  418. if err != nil {
  419. return err
  420. }
  421. }
  422. hdr, err := FileInfoHeader(name, fi, link)
  423. if err != nil {
  424. return err
  425. }
  426. if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
  427. return err
  428. }
  429. // if it's not a directory and has more than 1 link,
  430. // it's hard linked, so set the type flag accordingly
  431. if !fi.IsDir() && hasHardlinks(fi) {
  432. inode, err := getInodeFromStat(fi.Sys())
  433. if err != nil {
  434. return err
  435. }
  436. // a link should have a name that it links too
  437. // and that linked name should be first in the tar archive
  438. if oldpath, ok := ta.SeenFiles[inode]; ok {
  439. hdr.Typeflag = tar.TypeLink
  440. hdr.Linkname = oldpath
  441. hdr.Size = 0 // This Must be here for the writer math to add up!
  442. } else {
  443. ta.SeenFiles[inode] = name
  444. }
  445. }
  446. //check whether the file is overlayfs whiteout
  447. //if yes, skip re-mapping container ID mappings.
  448. isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0
  449. //handle re-mapping container ID mappings back to host ID mappings before
  450. //writing tar headers/files. We skip whiteout files because they were written
  451. //by the kernel and already have proper ownership relative to the host
  452. if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
  453. fileIDPair, err := getFileUIDGID(fi.Sys())
  454. if err != nil {
  455. return err
  456. }
  457. hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair)
  458. if err != nil {
  459. return err
  460. }
  461. }
  462. // explicitly override with ChownOpts
  463. if ta.ChownOpts != nil {
  464. hdr.Uid = ta.ChownOpts.UID
  465. hdr.Gid = ta.ChownOpts.GID
  466. }
  467. if ta.WhiteoutConverter != nil {
  468. wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
  469. if err != nil {
  470. return err
  471. }
  472. // If a new whiteout file exists, write original hdr, then
  473. // replace hdr with wo to be written after. Whiteouts should
  474. // always be written after the original. Note the original
  475. // hdr may have been updated to be a whiteout with returning
  476. // a whiteout header
  477. if wo != nil {
  478. if err := ta.TarWriter.WriteHeader(hdr); err != nil {
  479. return err
  480. }
  481. if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
  482. return fmt.Errorf("tar: cannot use whiteout for non-empty file")
  483. }
  484. hdr = wo
  485. }
  486. }
  487. if err := ta.TarWriter.WriteHeader(hdr); err != nil {
  488. return err
  489. }
  490. if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
  491. // We use system.OpenSequential to ensure we use sequential file
  492. // access on Windows to avoid depleting the standby list.
  493. // On Linux, this equates to a regular os.Open.
  494. file, err := system.OpenSequential(path)
  495. if err != nil {
  496. return err
  497. }
  498. ta.Buffer.Reset(ta.TarWriter)
  499. defer ta.Buffer.Reset(nil)
  500. _, err = io.Copy(ta.Buffer, file)
  501. file.Close()
  502. if err != nil {
  503. return err
  504. }
  505. err = ta.Buffer.Flush()
  506. if err != nil {
  507. return err
  508. }
  509. }
  510. return nil
  511. }
  512. func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error {
  513. // hdr.Mode is in linux format, which we can use for sycalls,
  514. // but for os.Foo() calls we need the mode converted to os.FileMode,
  515. // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
  516. hdrInfo := hdr.FileInfo()
  517. switch hdr.Typeflag {
  518. case tar.TypeDir:
  519. // Create directory unless it exists as a directory already.
  520. // In that case we just want to merge the two
  521. if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
  522. if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
  523. return err
  524. }
  525. }
  526. case tar.TypeReg, tar.TypeRegA:
  527. // Source is regular file. We use system.OpenFileSequential to use sequential
  528. // file access to avoid depleting the standby list on Windows.
  529. // On Linux, this equates to a regular os.OpenFile
  530. file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
  531. if err != nil {
  532. return err
  533. }
  534. if _, err := io.Copy(file, reader); err != nil {
  535. file.Close()
  536. return err
  537. }
  538. file.Close()
  539. case tar.TypeBlock, tar.TypeChar:
  540. if inUserns { // cannot create devices in a userns
  541. return nil
  542. }
  543. // Handle this is an OS-specific way
  544. if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
  545. return err
  546. }
  547. case tar.TypeFifo:
  548. // Handle this is an OS-specific way
  549. if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
  550. return err
  551. }
  552. case tar.TypeLink:
  553. targetPath := filepath.Join(extractDir, hdr.Linkname)
  554. // check for hardlink breakout
  555. if !strings.HasPrefix(targetPath, extractDir) {
  556. return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
  557. }
  558. if err := os.Link(targetPath, path); err != nil {
  559. return err
  560. }
  561. case tar.TypeSymlink:
  562. // path -> hdr.Linkname = targetPath
  563. // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
  564. targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
  565. // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
  566. // that symlink would first have to be created, which would be caught earlier, at this very check:
  567. if !strings.HasPrefix(targetPath, extractDir) {
  568. return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
  569. }
  570. if err := os.Symlink(hdr.Linkname, path); err != nil {
  571. return err
  572. }
  573. case tar.TypeXGlobalHeader:
  574. logrus.Debug("PAX Global Extended Headers found and ignored")
  575. return nil
  576. default:
  577. return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
  578. }
  579. // Lchown is not supported on Windows.
  580. if Lchown && runtime.GOOS != "windows" {
  581. if chownOpts == nil {
  582. chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}
  583. }
  584. if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
  585. return err
  586. }
  587. }
  588. var errors []string
  589. for key, value := range hdr.Xattrs {
  590. if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
  591. if err == syscall.ENOTSUP || err == syscall.EPERM {
  592. // We ignore errors here because not all graphdrivers support
  593. // xattrs *cough* old versions of AUFS *cough*. However only
  594. // ENOTSUP should be emitted in that case, otherwise we still
  595. // bail.
  596. // EPERM occurs if modifying xattrs is not allowed. This can
  597. // happen when running in userns with restrictions (ChromeOS).
  598. errors = append(errors, err.Error())
  599. continue
  600. }
  601. return err
  602. }
  603. }
  604. if len(errors) > 0 {
  605. logrus.WithFields(logrus.Fields{
  606. "errors": errors,
  607. }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
  608. }
  609. // There is no LChmod, so ignore mode for symlink. Also, this
  610. // must happen after chown, as that can modify the file mode
  611. if err := handleLChmod(hdr, path, hdrInfo); err != nil {
  612. return err
  613. }
  614. aTime := hdr.AccessTime
  615. if aTime.Before(hdr.ModTime) {
  616. // Last access time should never be before last modified time.
  617. aTime = hdr.ModTime
  618. }
  619. // system.Chtimes doesn't support a NOFOLLOW flag atm
  620. if hdr.Typeflag == tar.TypeLink {
  621. if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
  622. if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
  623. return err
  624. }
  625. }
  626. } else if hdr.Typeflag != tar.TypeSymlink {
  627. if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
  628. return err
  629. }
  630. } else {
  631. ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
  632. if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
  633. return err
  634. }
  635. }
  636. return nil
  637. }
  638. // Tar creates an archive from the directory at `path`, and returns it as a
  639. // stream of bytes.
  640. func Tar(path string, compression Compression) (io.ReadCloser, error) {
  641. return TarWithOptions(path, &TarOptions{Compression: compression})
  642. }
  643. // TarWithOptions creates an archive from the directory at `path`, only including files whose relative
  644. // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
  645. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
  646. // Fix the source path to work with long path names. This is a no-op
  647. // on platforms other than Windows.
  648. srcPath = fixVolumePathPrefix(srcPath)
  649. pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
  650. if err != nil {
  651. return nil, err
  652. }
  653. pipeReader, pipeWriter := io.Pipe()
  654. compressWriter, err := CompressStream(pipeWriter, options.Compression)
  655. if err != nil {
  656. return nil, err
  657. }
  658. go func() {
  659. ta := newTarAppender(
  660. idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
  661. compressWriter,
  662. options.ChownOpts,
  663. )
  664. ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
  665. defer func() {
  666. // Make sure to check the error on Close.
  667. if err := ta.TarWriter.Close(); err != nil {
  668. logrus.Errorf("Can't close tar writer: %s", err)
  669. }
  670. if err := compressWriter.Close(); err != nil {
  671. logrus.Errorf("Can't close compress writer: %s", err)
  672. }
  673. if err := pipeWriter.Close(); err != nil {
  674. logrus.Errorf("Can't close pipe writer: %s", err)
  675. }
  676. }()
  677. // this buffer is needed for the duration of this piped stream
  678. defer pools.BufioWriter32KPool.Put(ta.Buffer)
  679. // In general we log errors here but ignore them because
  680. // during e.g. a diff operation the container can continue
  681. // mutating the filesystem and we can see transient errors
  682. // from this
  683. stat, err := os.Lstat(srcPath)
  684. if err != nil {
  685. return
  686. }
  687. if !stat.IsDir() {
  688. // We can't later join a non-dir with any includes because the
  689. // 'walk' will error if "file/." is stat-ed and "file" is not a
  690. // directory. So, we must split the source path and use the
  691. // basename as the include.
  692. if len(options.IncludeFiles) > 0 {
  693. logrus.Warn("Tar: Can't archive a file with includes")
  694. }
  695. dir, base := SplitPathDirEntry(srcPath)
  696. srcPath = dir
  697. options.IncludeFiles = []string{base}
  698. }
  699. if len(options.IncludeFiles) == 0 {
  700. options.IncludeFiles = []string{"."}
  701. }
  702. seen := make(map[string]bool)
  703. for _, include := range options.IncludeFiles {
  704. rebaseName := options.RebaseNames[include]
  705. walkRoot := getWalkRoot(srcPath, include)
  706. filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
  707. if err != nil {
  708. logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
  709. return nil
  710. }
  711. relFilePath, err := filepath.Rel(srcPath, filePath)
  712. if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
  713. // Error getting relative path OR we are looking
  714. // at the source directory path. Skip in both situations.
  715. return nil
  716. }
  717. if options.IncludeSourceDir && include == "." && relFilePath != "." {
  718. relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
  719. }
  720. skip := false
  721. // If "include" is an exact match for the current file
  722. // then even if there's an "excludePatterns" pattern that
  723. // matches it, don't skip it. IOW, assume an explicit 'include'
  724. // is asking for that file no matter what - which is true
  725. // for some files, like .dockerignore and Dockerfile (sometimes)
  726. if include != relFilePath {
  727. skip, err = pm.Matches(relFilePath)
  728. if err != nil {
  729. logrus.Errorf("Error matching %s: %v", relFilePath, err)
  730. return err
  731. }
  732. }
  733. if skip {
  734. // If we want to skip this file and its a directory
  735. // then we should first check to see if there's an
  736. // excludes pattern (e.g. !dir/file) that starts with this
  737. // dir. If so then we can't skip this dir.
  738. // Its not a dir then so we can just return/skip.
  739. if !f.IsDir() {
  740. return nil
  741. }
  742. // No exceptions (!...) in patterns so just skip dir
  743. if !pm.Exclusions() {
  744. return filepath.SkipDir
  745. }
  746. dirSlash := relFilePath + string(filepath.Separator)
  747. for _, pat := range pm.Patterns() {
  748. if !pat.Exclusion() {
  749. continue
  750. }
  751. if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
  752. // found a match - so can't skip this dir
  753. return nil
  754. }
  755. }
  756. // No matching exclusion dir so just skip dir
  757. return filepath.SkipDir
  758. }
  759. if seen[relFilePath] {
  760. return nil
  761. }
  762. seen[relFilePath] = true
  763. // Rename the base resource.
  764. if rebaseName != "" {
  765. var replacement string
  766. if rebaseName != string(filepath.Separator) {
  767. // Special case the root directory to replace with an
  768. // empty string instead so that we don't end up with
  769. // double slashes in the paths.
  770. replacement = rebaseName
  771. }
  772. relFilePath = strings.Replace(relFilePath, include, replacement, 1)
  773. }
  774. if err := ta.addTarFile(filePath, relFilePath); err != nil {
  775. logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
  776. // if pipe is broken, stop writing tar stream to it
  777. if err == io.ErrClosedPipe {
  778. return err
  779. }
  780. }
  781. return nil
  782. })
  783. }
  784. }()
  785. return pipeReader, nil
  786. }
  787. // Unpack unpacks the decompressedArchive to dest with options.
  788. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
  789. tr := tar.NewReader(decompressedArchive)
  790. trBuf := pools.BufioReader32KPool.Get(nil)
  791. defer pools.BufioReader32KPool.Put(trBuf)
  792. var dirs []*tar.Header
  793. idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
  794. rootIDs := idMapping.RootPair()
  795. whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
  796. // Iterate through the files in the archive.
  797. loop:
  798. for {
  799. hdr, err := tr.Next()
  800. if err == io.EOF {
  801. // end of tar archive
  802. break
  803. }
  804. if err != nil {
  805. return err
  806. }
  807. // Normalize name, for safety and for a simple is-root check
  808. // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
  809. // This keeps "..\" as-is, but normalizes "\..\" to "\".
  810. hdr.Name = filepath.Clean(hdr.Name)
  811. for _, exclude := range options.ExcludePatterns {
  812. if strings.HasPrefix(hdr.Name, exclude) {
  813. continue loop
  814. }
  815. }
  816. // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
  817. // the filepath format for the OS on which the daemon is running. Hence
  818. // the check for a slash-suffix MUST be done in an OS-agnostic way.
  819. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
  820. // Not the root directory, ensure that the parent directory exists
  821. parent := filepath.Dir(hdr.Name)
  822. parentPath := filepath.Join(dest, parent)
  823. if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
  824. err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
  825. if err != nil {
  826. return err
  827. }
  828. }
  829. }
  830. path := filepath.Join(dest, hdr.Name)
  831. rel, err := filepath.Rel(dest, path)
  832. if err != nil {
  833. return err
  834. }
  835. if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
  836. return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
  837. }
  838. // If path exits we almost always just want to remove and replace it
  839. // The only exception is when it is a directory *and* the file from
  840. // the layer is also a directory. Then we want to merge them (i.e.
  841. // just apply the metadata from the layer).
  842. if fi, err := os.Lstat(path); err == nil {
  843. if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
  844. // If NoOverwriteDirNonDir is true then we cannot replace
  845. // an existing directory with a non-directory from the archive.
  846. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
  847. }
  848. if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
  849. // If NoOverwriteDirNonDir is true then we cannot replace
  850. // an existing non-directory with a directory from the archive.
  851. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
  852. }
  853. if fi.IsDir() && hdr.Name == "." {
  854. continue
  855. }
  856. if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
  857. if err := os.RemoveAll(path); err != nil {
  858. return err
  859. }
  860. }
  861. }
  862. trBuf.Reset(tr)
  863. if err := remapIDs(idMapping, hdr); err != nil {
  864. return err
  865. }
  866. if whiteoutConverter != nil {
  867. writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
  868. if err != nil {
  869. return err
  870. }
  871. if !writeFile {
  872. continue
  873. }
  874. }
  875. if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
  876. return err
  877. }
  878. // Directory mtimes must be handled at the end to avoid further
  879. // file creation in them to modify the directory mtime
  880. if hdr.Typeflag == tar.TypeDir {
  881. dirs = append(dirs, hdr)
  882. }
  883. }
  884. for _, hdr := range dirs {
  885. path := filepath.Join(dest, hdr.Name)
  886. if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
  887. return err
  888. }
  889. }
  890. return nil
  891. }
  892. // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
  893. // and unpacks it into the directory at `dest`.
  894. // The archive may be compressed with one of the following algorithms:
  895. // identity (uncompressed), gzip, bzip2, xz.
  896. // FIXME: specify behavior when target path exists vs. doesn't exist.
  897. func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
  898. return untarHandler(tarArchive, dest, options, true)
  899. }
  900. // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
  901. // and unpacks it into the directory at `dest`.
  902. // The archive must be an uncompressed stream.
  903. func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
  904. return untarHandler(tarArchive, dest, options, false)
  905. }
  906. // Handler for teasing out the automatic decompression
  907. func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
  908. if tarArchive == nil {
  909. return fmt.Errorf("Empty archive")
  910. }
  911. dest = filepath.Clean(dest)
  912. if options == nil {
  913. options = &TarOptions{}
  914. }
  915. if options.ExcludePatterns == nil {
  916. options.ExcludePatterns = []string{}
  917. }
  918. r := tarArchive
  919. if decompress {
  920. decompressedArchive, err := DecompressStream(tarArchive)
  921. if err != nil {
  922. return err
  923. }
  924. defer decompressedArchive.Close()
  925. r = decompressedArchive
  926. }
  927. return Unpack(r, dest, options)
  928. }
  929. // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
  930. // If either Tar or Untar fails, TarUntar aborts and returns the error.
  931. func (archiver *Archiver) TarUntar(src, dst string) error {
  932. logrus.Debugf("TarUntar(%s %s)", src, dst)
  933. archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
  934. if err != nil {
  935. return err
  936. }
  937. defer archive.Close()
  938. options := &TarOptions{
  939. UIDMaps: archiver.IDMapping.UIDs(),
  940. GIDMaps: archiver.IDMapping.GIDs(),
  941. }
  942. return archiver.Untar(archive, dst, options)
  943. }
  944. // UntarPath untar a file from path to a destination, src is the source tar file path.
  945. func (archiver *Archiver) UntarPath(src, dst string) error {
  946. archive, err := os.Open(src)
  947. if err != nil {
  948. return err
  949. }
  950. defer archive.Close()
  951. options := &TarOptions{
  952. UIDMaps: archiver.IDMapping.UIDs(),
  953. GIDMaps: archiver.IDMapping.GIDs(),
  954. }
  955. return archiver.Untar(archive, dst, options)
  956. }
  957. // CopyWithTar creates a tar archive of filesystem path `src`, and
  958. // unpacks it at filesystem path `dst`.
  959. // The archive is streamed directly with fixed buffering and no
  960. // intermediary disk IO.
  961. func (archiver *Archiver) CopyWithTar(src, dst string) error {
  962. srcSt, err := os.Stat(src)
  963. if err != nil {
  964. return err
  965. }
  966. if !srcSt.IsDir() {
  967. return archiver.CopyFileWithTar(src, dst)
  968. }
  969. // if this Archiver is set up with ID mapping we need to create
  970. // the new destination directory with the remapped root UID/GID pair
  971. // as owner
  972. rootIDs := archiver.IDMapping.RootPair()
  973. // Create dst, copy src's content into it
  974. logrus.Debugf("Creating dest directory: %s", dst)
  975. if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
  976. return err
  977. }
  978. logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
  979. return archiver.TarUntar(src, dst)
  980. }
  981. // CopyFileWithTar emulates the behavior of the 'cp' command-line
  982. // for a single file. It copies a regular file from path `src` to
  983. // path `dst`, and preserves all its metadata.
  984. func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
  985. logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
  986. srcSt, err := os.Stat(src)
  987. if err != nil {
  988. return err
  989. }
  990. if srcSt.IsDir() {
  991. return fmt.Errorf("Can't copy a directory")
  992. }
  993. // Clean up the trailing slash. This must be done in an operating
  994. // system specific manner.
  995. if dst[len(dst)-1] == os.PathSeparator {
  996. dst = filepath.Join(dst, filepath.Base(src))
  997. }
  998. // Create the holding directory if necessary
  999. if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil {
  1000. return err
  1001. }
  1002. r, w := io.Pipe()
  1003. errC := make(chan error, 1)
  1004. go func() {
  1005. defer close(errC)
  1006. errC <- func() error {
  1007. defer w.Close()
  1008. srcF, err := os.Open(src)
  1009. if err != nil {
  1010. return err
  1011. }
  1012. defer srcF.Close()
  1013. hdr, err := tar.FileInfoHeader(srcSt, "")
  1014. if err != nil {
  1015. return err
  1016. }
  1017. hdr.Format = tar.FormatPAX
  1018. hdr.ModTime = hdr.ModTime.Truncate(time.Second)
  1019. hdr.AccessTime = time.Time{}
  1020. hdr.ChangeTime = time.Time{}
  1021. hdr.Name = filepath.Base(dst)
  1022. hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
  1023. if err := remapIDs(archiver.IDMapping, hdr); err != nil {
  1024. return err
  1025. }
  1026. tw := tar.NewWriter(w)
  1027. defer tw.Close()
  1028. if err := tw.WriteHeader(hdr); err != nil {
  1029. return err
  1030. }
  1031. if _, err := io.Copy(tw, srcF); err != nil {
  1032. return err
  1033. }
  1034. return nil
  1035. }()
  1036. }()
  1037. defer func() {
  1038. if er := <-errC; err == nil && er != nil {
  1039. err = er
  1040. }
  1041. }()
  1042. err = archiver.Untar(r, filepath.Dir(dst), nil)
  1043. if err != nil {
  1044. r.CloseWithError(err)
  1045. }
  1046. return err
  1047. }
  1048. // IdentityMapping returns the IdentityMapping of the archiver.
  1049. func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping {
  1050. return archiver.IDMapping
  1051. }
  1052. func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error {
  1053. ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid})
  1054. hdr.Uid, hdr.Gid = ids.UID, ids.GID
  1055. return err
  1056. }
  1057. // cmdStream executes a command, and returns its stdout as a stream.
  1058. // If the command fails to run or doesn't complete successfully, an error
  1059. // will be returned, including anything written on stderr.
  1060. func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
  1061. cmd.Stdin = input
  1062. pipeR, pipeW := io.Pipe()
  1063. cmd.Stdout = pipeW
  1064. var errBuf bytes.Buffer
  1065. cmd.Stderr = &errBuf
  1066. // Run the command and return the pipe
  1067. if err := cmd.Start(); err != nil {
  1068. return nil, err
  1069. }
  1070. // Copy stdout to the returned pipe
  1071. go func() {
  1072. if err := cmd.Wait(); err != nil {
  1073. pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
  1074. } else {
  1075. pipeW.Close()
  1076. }
  1077. }()
  1078. return pipeR, nil
  1079. }
  1080. // NewTempArchive reads the content of src into a temporary file, and returns the contents
  1081. // of that file as an archive. The archive can only be read once - as soon as reading completes,
  1082. // the file will be deleted.
  1083. func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
  1084. f, err := ioutil.TempFile(dir, "")
  1085. if err != nil {
  1086. return nil, err
  1087. }
  1088. if _, err := io.Copy(f, src); err != nil {
  1089. return nil, err
  1090. }
  1091. if _, err := f.Seek(0, 0); err != nil {
  1092. return nil, err
  1093. }
  1094. st, err := f.Stat()
  1095. if err != nil {
  1096. return nil, err
  1097. }
  1098. size := st.Size()
  1099. return &TempArchive{File: f, Size: size}, nil
  1100. }
  1101. // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
  1102. // the file will be deleted.
  1103. type TempArchive struct {
  1104. *os.File
  1105. Size int64 // Pre-computed from Stat().Size() as a convenience
  1106. read int64
  1107. closed bool
  1108. }
  1109. // Close closes the underlying file if it's still open, or does a no-op
  1110. // to allow callers to try to close the TempArchive multiple times safely.
  1111. func (archive *TempArchive) Close() error {
  1112. if archive.closed {
  1113. return nil
  1114. }
  1115. archive.closed = true
  1116. return archive.File.Close()
  1117. }
  1118. func (archive *TempArchive) Read(data []byte) (int, error) {
  1119. n, err := archive.File.Read(data)
  1120. archive.read += int64(n)
  1121. if err != nil || archive.read == archive.Size {
  1122. archive.Close()
  1123. os.Remove(archive.File.Name())
  1124. }
  1125. return n, err
  1126. }