archive_unix.go 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. // +build !windows
  2. package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
  3. import (
  4. "bytes"
  5. "encoding/json"
  6. "flag"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "os"
  11. "path/filepath"
  12. "runtime"
  13. "github.com/docker/docker/pkg/archive"
  14. "github.com/docker/docker/pkg/reexec"
  15. )
  16. // untar is the entry-point for docker-untar on re-exec. This is not used on
  17. // Windows as it does not support chroot, hence no point sandboxing through
  18. // chroot and rexec.
  19. func untar() {
  20. runtime.LockOSThread()
  21. flag.Parse()
  22. var options *archive.TarOptions
  23. //read the options from the pipe "ExtraFiles"
  24. if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
  25. fatal(err)
  26. }
  27. dst := flag.Arg(0)
  28. var root string
  29. if len(flag.Args()) > 1 {
  30. root = flag.Arg(1)
  31. }
  32. if root == "" {
  33. root = dst
  34. }
  35. if err := chroot(root); err != nil {
  36. fatal(err)
  37. }
  38. if err := archive.Unpack(os.Stdin, dst, options); err != nil {
  39. fatal(err)
  40. }
  41. // fully consume stdin in case it is zero padded
  42. if _, err := flush(os.Stdin); err != nil {
  43. fatal(err)
  44. }
  45. os.Exit(0)
  46. }
  47. func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
  48. // We can't pass a potentially large exclude list directly via cmd line
  49. // because we easily overrun the kernel's max argument/environment size
  50. // when the full image list is passed (e.g. when this is used by
  51. // `docker load`). We will marshall the options via a pipe to the
  52. // child
  53. r, w, err := os.Pipe()
  54. if err != nil {
  55. return fmt.Errorf("Untar pipe failure: %v", err)
  56. }
  57. if root != "" {
  58. relDest, err := filepath.Rel(root, dest)
  59. if err != nil {
  60. return err
  61. }
  62. if relDest == "." {
  63. relDest = "/"
  64. }
  65. if relDest[0] != '/' {
  66. relDest = "/" + relDest
  67. }
  68. dest = relDest
  69. }
  70. cmd := reexec.Command("docker-untar", dest, root)
  71. cmd.Stdin = decompressedArchive
  72. cmd.ExtraFiles = append(cmd.ExtraFiles, r)
  73. output := bytes.NewBuffer(nil)
  74. cmd.Stdout = output
  75. cmd.Stderr = output
  76. if err := cmd.Start(); err != nil {
  77. w.Close()
  78. return fmt.Errorf("Untar error on re-exec cmd: %v", err)
  79. }
  80. //write the options to the pipe for the untar exec to read
  81. if err := json.NewEncoder(w).Encode(options); err != nil {
  82. w.Close()
  83. return fmt.Errorf("Untar json encode to pipe failed: %v", err)
  84. }
  85. w.Close()
  86. if err := cmd.Wait(); err != nil {
  87. // when `xz -d -c -q | docker-untar ...` failed on docker-untar side,
  88. // we need to exhaust `xz`'s output, otherwise the `xz` side will be
  89. // pending on write pipe forever
  90. io.Copy(ioutil.Discard, decompressedArchive)
  91. return fmt.Errorf("Error processing tar file(%v): %s", err, output)
  92. }
  93. return nil
  94. }