瀏覽代碼

Merge pull request #44598 from cpuguy83/save_tar_oci

image save: make output tarball OCI compliant
Sebastiaan van Stijn 2 年之前
父節點
當前提交
f139017bd0

+ 12 - 0
image/image.go

@@ -14,6 +14,7 @@ import (
 	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/layer"
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/go-digest"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 )
 )
 
 
 // ID is the content-addressable ID of an image.
 // ID is the content-addressable ID of an image.
@@ -174,6 +175,17 @@ func (img *Image) OperatingSystem() string {
 	return os
 	return os
 }
 }
 
 
+// Platform generates an OCI platform from the image
+func (img *Image) Platform() ocispec.Platform {
+	return ocispec.Platform{
+		Architecture: img.Architecture,
+		OS:           img.OS,
+		OSVersion:    img.OSVersion,
+		OSFeatures:   img.OSFeatures,
+		Variant:      img.Variant,
+	}
+}
+
 // MarshalJSON serializes the image to JSON. It sorts the top-level keys so
 // MarshalJSON serializes the image to JSON. It sorts the top-level keys so
 // that JSON that's been manipulated by a push/pull cycle with a legacy
 // that JSON that's been manipulated by a push/pull cycle with a legacy
 // registry won't end up with a different key order.
 // registry won't end up with a different key order.

+ 141 - 36
image/tarexport/save.go

@@ -9,6 +9,7 @@ import (
 	"path/filepath"
 	"path/filepath"
 	"time"
 	"time"
 
 
+	"github.com/containerd/containerd/images"
 	"github.com/docker/distribution"
 	"github.com/docker/distribution"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
@@ -18,12 +19,14 @@ import (
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/system"
 	"github.com/moby/sys/sequential"
 	"github.com/moby/sys/sequential"
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/go-digest"
+	"github.com/opencontainers/image-spec/specs-go"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 
 type imageDescriptor struct {
 type imageDescriptor struct {
 	refs     []reference.NamedTagged
 	refs     []reference.NamedTagged
-	layers   []string
+	layers   []digest.Digest
 	image    *image.Image
 	image    *image.Image
 	layerRef layer.Layer
 	layerRef layer.Layer
 }
 }
@@ -190,32 +193,96 @@ func (s *saveSession) save(outStream io.Writer) error {
 	var manifest []manifestItem
 	var manifest []manifestItem
 	var parentLinks []parentLink
 	var parentLinks []parentLink
 
 
+	var manifestDescriptors []ocispec.Descriptor
+
 	for id, imageDescr := range s.images {
 	for id, imageDescr := range s.images {
 		foreignSrcs, err := s.saveImage(id)
 		foreignSrcs, err := s.saveImage(id)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
 
 
-		var repoTags []string
-		var layers []string
+		var (
+			repoTags []string
+			layers   []string
+			foreign  = make([]ocispec.Descriptor, 0, len(foreignSrcs))
+		)
+
+		for _, desc := range foreignSrcs {
+			foreign = append(foreign, ocispec.Descriptor{
+				MediaType:   desc.MediaType,
+				Digest:      desc.Digest,
+				Size:        desc.Size,
+				URLs:        desc.URLs,
+				Annotations: desc.Annotations,
+				Platform:    desc.Platform,
+			})
+		}
+
+		imgPlat := imageDescr.image.Platform()
+
+		m := ocispec.Manifest{
+			Versioned: specs.Versioned{
+				SchemaVersion: 2,
+			},
+			MediaType: ocispec.MediaTypeImageManifest,
+			Config: ocispec.Descriptor{
+				MediaType: ocispec.MediaTypeImageConfig,
+				Digest:    digest.Digest(imageDescr.image.ID()),
+				Size:      int64(len(imageDescr.image.RawJSON())),
+				Platform:  &imgPlat,
+			},
+			Layers: foreign,
+		}
+
+		data, err := json.Marshal(m)
+		if err != nil {
+			return errors.Wrap(err, "error marshaling manifest")
+		}
+		dgst := digest.FromBytes(data)
+
+		mFile := filepath.Join(s.outDir, "blobs", dgst.Algorithm().String(), dgst.Encoded())
+		if err := os.MkdirAll(filepath.Dir(mFile), 0o755); err != nil {
+			return errors.Wrap(err, "error creating blob directory")
+		}
+		if err := system.Chtimes(filepath.Dir(mFile), time.Unix(0, 0), time.Unix(0, 0)); err != nil {
+			return errors.Wrap(err, "error setting blob directory timestamps")
+		}
+		if err := os.WriteFile(mFile, data, 0o644); err != nil {
+			return errors.Wrap(err, "error writing oci manifest file")
+		}
+		if err := system.Chtimes(mFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
+			return errors.Wrap(err, "error setting blob directory timestamps")
+		}
+		size := int64(len(data))
 
 
 		for _, ref := range imageDescr.refs {
 		for _, ref := range imageDescr.refs {
 			familiarName := reference.FamiliarName(ref)
 			familiarName := reference.FamiliarName(ref)
 			if _, ok := reposLegacy[familiarName]; !ok {
 			if _, ok := reposLegacy[familiarName]; !ok {
 				reposLegacy[familiarName] = make(map[string]string)
 				reposLegacy[familiarName] = make(map[string]string)
 			}
 			}
-			reposLegacy[familiarName][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1]
+			reposLegacy[familiarName][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1].Encoded()
 			repoTags = append(repoTags, reference.FamiliarString(ref))
 			repoTags = append(repoTags, reference.FamiliarString(ref))
+
+			manifestDescriptors = append(manifestDescriptors, ocispec.Descriptor{
+				MediaType: ocispec.MediaTypeImageManifest,
+				Digest:    dgst,
+				Size:      size,
+				Platform:  m.Config.Platform,
+				Annotations: map[string]string{
+					images.AnnotationImageName: ref.String(),
+					ocispec.AnnotationRefName:  ref.Tag(),
+				},
+			})
 		}
 		}
 
 
 		for _, l := range imageDescr.layers {
 		for _, l := range imageDescr.layers {
 			// IMPORTANT: We use path, not filepath here to ensure the layers
 			// IMPORTANT: We use path, not filepath here to ensure the layers
 			// in the manifest use Unix-style forward-slashes.
 			// in the manifest use Unix-style forward-slashes.
-			layers = append(layers, path.Join(l, legacyLayerFileName))
+			layers = append(layers, path.Join("blobs", l.Algorithm().String(), l.Encoded()))
 		}
 		}
 
 
 		manifest = append(manifest, manifestItem{
 		manifest = append(manifest, manifestItem{
-			Config:       id.Digest().Encoded() + ".json",
+			Config:       path.Join("blobs", id.Digest().Algorithm().String(), id.Digest().Encoded()),
 			RepoTags:     repoTags,
 			RepoTags:     repoTags,
 			Layers:       layers,
 			Layers:       layers,
 			LayerSources: foreignSrcs,
 			LayerSources: foreignSrcs,
@@ -251,8 +318,8 @@ func (s *saveSession) save(outStream io.Writer) error {
 		}
 		}
 	}
 	}
 
 
-	manifestFileName := filepath.Join(tempDir, manifestFileName)
-	f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+	manifestPath := filepath.Join(tempDir, manifestFileName)
+	f, err := os.OpenFile(manifestPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -264,10 +331,34 @@ func (s *saveSession) save(outStream io.Writer) error {
 
 
 	f.Close()
 	f.Close()
 
 
-	if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
+	if err := system.Chtimes(manifestPath, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
 		return err
 		return err
 	}
 	}
 
 
+	layoutPath := filepath.Join(tempDir, ociLayoutFilename)
+	if err := os.WriteFile(layoutPath, []byte(ociLayoutContent), 0o644); err != nil {
+		return errors.Wrap(err, "error writing oci layout file")
+	}
+	if err := system.Chtimes(layoutPath, time.Unix(0, 0), time.Unix(0, 0)); err != nil {
+		return errors.Wrap(err, "error setting oci layout file timestamps")
+	}
+
+	data, err := json.Marshal(ocispec.Index{
+		Versioned: specs.Versioned{
+			SchemaVersion: 2,
+		},
+		MediaType: ocispec.MediaTypeImageIndex,
+		Manifests: manifestDescriptors,
+	})
+	if err != nil {
+		return errors.Wrap(err, "error marshaling oci index")
+	}
+
+	idxFile := filepath.Join(s.outDir, ociIndexFileName)
+	if err := os.WriteFile(idxFile, data, 0o644); err != nil {
+		return errors.Wrap(err, "error writing oci index file")
+	}
+
 	fs, err := archive.Tar(tempDir, archive.Uncompressed)
 	fs, err := archive.Tar(tempDir, archive.Uncompressed)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -285,9 +376,9 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc
 	}
 	}
 
 
 	var parent digest.Digest
 	var parent digest.Digest
-	var layers []string
+	var layers []digest.Digest
 	var foreignSrcs map[layer.DiffID]distribution.Descriptor
 	var foreignSrcs map[layer.DiffID]distribution.Descriptor
-	for i := range img.RootFS.DiffIDs {
+	for i, diffID := range img.RootFS.DiffIDs {
 		v1Img := image.V1Image{
 		v1Img := image.V1Image{
 			// This is for backward compatibility used for
 			// This is for backward compatibility used for
 			// pre v1.9 docker.
 			// pre v1.9 docker.
@@ -313,7 +404,8 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
-		layers = append(layers, v1Img.ID)
+
+		layers = append(layers, digest.Digest(diffID))
 		parent = v1ID
 		parent = v1ID
 		if src.Digest != "" {
 		if src.Digest != "" {
 			if foreignSrcs == nil {
 			if foreignSrcs == nil {
@@ -323,7 +415,21 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc
 		}
 		}
 	}
 	}
 
 
-	configFile := filepath.Join(s.outDir, id.Digest().Encoded()+".json")
+	data := img.RawJSON()
+	dgst := digest.FromBytes(data)
+
+	blobDir := filepath.Join(s.outDir, "blobs", dgst.Algorithm().String())
+	if err := os.MkdirAll(blobDir, 0o755); err != nil {
+		return nil, err
+	}
+	if err := system.Chtimes(blobDir, img.Created, img.Created); err != nil {
+		return nil, err
+	}
+	if err := system.Chtimes(filepath.Dir(blobDir), img.Created, img.Created); err != nil {
+		return nil, err
+	}
+
+	configFile := filepath.Join(blobDir, dgst.Encoded())
 	if err := os.WriteFile(configFile, img.RawJSON(), 0o644); err != nil {
 	if err := os.WriteFile(configFile, img.RawJSON(), 0o644); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -340,47 +446,46 @@ func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, creat
 		return distribution.Descriptor{}, nil
 		return distribution.Descriptor{}, nil
 	}
 	}
 
 
-	outDir := filepath.Join(s.outDir, legacyImg.ID)
-	if err := os.Mkdir(outDir, 0755); err != nil {
-		return distribution.Descriptor{}, err
-	}
-
-	// todo: why is this version file here?
-	if err := os.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil {
-		return distribution.Descriptor{}, err
-	}
+	outDir := filepath.Join(s.outDir, "blobs")
 
 
 	imageConfig, err := json.Marshal(legacyImg)
 	imageConfig, err := json.Marshal(legacyImg)
 	if err != nil {
 	if err != nil {
 		return distribution.Descriptor{}, err
 		return distribution.Descriptor{}, err
 	}
 	}
 
 
-	if err := os.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil {
+	cfgDgst := digest.FromBytes(imageConfig)
+	configPath := filepath.Join(outDir, cfgDgst.Algorithm().String(), cfgDgst.Encoded())
+	if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil {
+		return distribution.Descriptor{}, errors.Wrap(err, "could not create layer dir parent")
+	}
+
+	if err := os.WriteFile(configPath, imageConfig, 0644); err != nil {
 		return distribution.Descriptor{}, err
 		return distribution.Descriptor{}, err
 	}
 	}
 
 
 	// serialize filesystem
 	// serialize filesystem
-	layerPath := filepath.Join(outDir, legacyLayerFileName)
 	l, err := s.lss.Get(id)
 	l, err := s.lss.Get(id)
 	if err != nil {
 	if err != nil {
 		return distribution.Descriptor{}, err
 		return distribution.Descriptor{}, err
 	}
 	}
+
+	lDgst := digest.Digest(l.DiffID())
+	layerPath := filepath.Join(outDir, lDgst.Algorithm().String(), lDgst.Encoded())
 	defer layer.ReleaseAndLog(s.lss, l)
 	defer layer.ReleaseAndLog(s.lss, l)
 
 
-	if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists {
-		relPath, err := filepath.Rel(outDir, oldPath)
-		if err != nil {
+	if _, err = os.Stat(layerPath); err != nil {
+		if !os.IsNotExist(err) {
 			return distribution.Descriptor{}, err
 			return distribution.Descriptor{}, err
 		}
 		}
-		if err := os.Symlink(relPath, layerPath); err != nil {
-			return distribution.Descriptor{}, errors.Wrap(err, "error creating symlink while saving layer")
-		}
-	} else {
+
 		// We use sequential file access to avoid depleting the standby list on
 		// We use sequential file access to avoid depleting the standby list on
 		// Windows. On Linux, this equates to a regular os.Create.
 		// Windows. On Linux, this equates to a regular os.Create.
+		if err := os.MkdirAll(filepath.Dir(layerPath), 0755); err != nil {
+			return distribution.Descriptor{}, errors.Wrap(err, "could not create layer dir parent")
+		}
 		tarFile, err := sequential.Create(layerPath)
 		tarFile, err := sequential.Create(layerPath)
 		if err != nil {
 		if err != nil {
-			return distribution.Descriptor{}, err
+			return distribution.Descriptor{}, errors.Wrap(err, "error creating layer file")
 		}
 		}
 		defer tarFile.Close()
 		defer tarFile.Close()
 
 
@@ -394,16 +499,16 @@ func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, creat
 			return distribution.Descriptor{}, err
 			return distribution.Descriptor{}, err
 		}
 		}
 
 
-		for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} {
+		for _, fname := range []string{outDir, configPath, layerPath} {
 			// todo: maybe save layer created timestamp?
 			// todo: maybe save layer created timestamp?
-			if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil {
-				return distribution.Descriptor{}, err
+			if err := system.Chtimes(fname, createdTime, createdTime); err != nil {
+				return distribution.Descriptor{}, errors.Wrap(err, "could not set layer timestamp")
 			}
 			}
 		}
 		}
 
 
 		s.diffIDPaths[l.DiffID()] = layerPath
 		s.diffIDPaths[l.DiffID()] = layerPath
+		s.savedLayers[legacyImg.ID] = struct{}{}
 	}
 	}
-	s.savedLayers[legacyImg.ID] = struct{}{}
 
 
 	var src distribution.Descriptor
 	var src distribution.Descriptor
 	if fs, ok := l.(distribution.Describable); ok {
 	if fs, ok := l.(distribution.Describable); ok {

+ 4 - 1
image/tarexport/tarexport.go

@@ -11,8 +11,11 @@ const (
 	manifestFileName           = "manifest.json"
 	manifestFileName           = "manifest.json"
 	legacyLayerFileName        = "layer.tar"
 	legacyLayerFileName        = "layer.tar"
 	legacyConfigFileName       = "json"
 	legacyConfigFileName       = "json"
-	legacyVersionFileName      = "VERSION"
 	legacyRepositoriesFileName = "repositories"
 	legacyRepositoriesFileName = "repositories"
+
+	ociIndexFileName  = "index.json"
+	ociLayoutFilename = "oci-layout"
+	ociLayoutContent  = `{"imageLayoutVersion": "1.0.0"}`
 )
 )
 
 
 type manifestItem struct {
 type manifestItem struct {

+ 0 - 151
integration-cli/docker_cli_save_load_test.go

@@ -1,24 +1,15 @@
 package main
 package main
 
 
 import (
 import (
-	"archive/tar"
-	"encoding/json"
 	"fmt"
 	"fmt"
-	"io"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
 	"path/filepath"
 	"path/filepath"
-	"reflect"
-	"regexp"
-	"sort"
 	"strings"
 	"strings"
 	"testing"
 	"testing"
-	"time"
 
 
 	"github.com/docker/docker/integration-cli/cli/build"
 	"github.com/docker/docker/integration-cli/cli/build"
-	"github.com/opencontainers/go-digest"
 	"gotest.tools/v3/assert"
 	"gotest.tools/v3/assert"
-	is "gotest.tools/v3/assert/cmp"
 	"gotest.tools/v3/icmd"
 	"gotest.tools/v3/icmd"
 )
 )
 
 
@@ -108,25 +99,6 @@ func (s *DockerCLISaveLoadSuite) TestSaveSingleTag(c *testing.T) {
 	assert.NilError(c, err, "failed to save repo with image ID and 'repositories' file: %s, %v", out, err)
 	assert.NilError(c, err, "failed to save repo with image ID and 'repositories' file: %s, %v", out, err)
 }
 }
 
 
-func (s *DockerCLISaveLoadSuite) TestSaveCheckTimes(c *testing.T) {
-	testRequires(c, DaemonIsLinux)
-	repoName := "busybox:latest"
-	out, _ := dockerCmd(c, "inspect", repoName)
-	var data []struct {
-		ID      string
-		Created time.Time
-	}
-	err := json.Unmarshal([]byte(out), &data)
-	assert.NilError(c, err, "failed to marshal from %q: err %v", repoName, err)
-	assert.Assert(c, len(data) != 0, "failed to marshal the data from %q", repoName)
-	tarTvTimeFormat := "2006-01-02 15:04"
-	out, err = RunCommandPipelineWithOutput(
-		exec.Command(dockerBinary, "save", repoName),
-		exec.Command("tar", "tv"),
-		exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Encoded())))
-	assert.NilError(c, err, "failed to save repo with image ID and 'repositories' file: %s, %v", out, err)
-}
-
 func (s *DockerCLISaveLoadSuite) TestSaveImageId(c *testing.T) {
 func (s *DockerCLISaveLoadSuite) TestSaveImageId(c *testing.T) {
 	testRequires(c, DaemonIsLinux)
 	testRequires(c, DaemonIsLinux)
 	repoName := "foobar-save-image-id-test"
 	repoName := "foobar-save-image-id-test"
@@ -215,129 +187,6 @@ func (s *DockerCLISaveLoadSuite) TestSaveMultipleNames(c *testing.T) {
 	assert.NilError(c, err, "failed to save multiple repos: %s, %v", out, err)
 	assert.NilError(c, err, "failed to save multiple repos: %s, %v", out, err)
 }
 }
 
 
-func (s *DockerCLISaveLoadSuite) TestSaveRepoWithMultipleImages(c *testing.T) {
-	testRequires(c, DaemonIsLinux)
-	makeImage := func(from string, tag string) string {
-		var (
-			out string
-		)
-		out, _ = dockerCmd(c, "run", "-d", from, "true")
-		cleanedContainerID := strings.TrimSpace(out)
-
-		out, _ = dockerCmd(c, "commit", cleanedContainerID, tag)
-		imageID := strings.TrimSpace(out)
-		return imageID
-	}
-
-	repoName := "foobar-save-multi-images-test"
-	tagFoo := repoName + ":foo"
-	tagBar := repoName + ":bar"
-
-	idFoo := makeImage("busybox:latest", tagFoo)
-	idBar := makeImage("busybox:latest", tagBar)
-
-	deleteImages(repoName)
-
-	// create the archive
-	out, err := RunCommandPipelineWithOutput(
-		exec.Command(dockerBinary, "save", repoName, "busybox:latest"),
-		exec.Command("tar", "t"))
-	assert.NilError(c, err, "failed to save multiple images: %s, %v", out, err)
-
-	lines := strings.Split(strings.TrimSpace(out), "\n")
-	var actual []string
-	for _, l := range lines {
-		if regexp.MustCompile(`^[a-f0-9]{64}\.json$`).Match([]byte(l)) {
-			actual = append(actual, strings.TrimSuffix(l, ".json"))
-		}
-	}
-
-	// make the list of expected layers
-	out = inspectField(c, "busybox:latest", "Id")
-	expected := []string{strings.TrimSpace(out), idFoo, idBar}
-
-	// prefixes are not in tar
-	for i := range expected {
-		expected[i] = digest.Digest(expected[i]).Encoded()
-	}
-
-	sort.Strings(actual)
-	sort.Strings(expected)
-	assert.Assert(c, is.DeepEqual(actual, expected), "archive does not contains the right layers: got %v, expected %v, output: %q", actual, expected, out)
-}
-
-// Issue #6722 #5892 ensure directories are included in changes
-func (s *DockerCLISaveLoadSuite) TestSaveDirectoryPermissions(c *testing.T) {
-	testRequires(c, DaemonIsLinux)
-	layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"}
-	layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"}
-
-	name := "save-directory-permissions"
-	tmpDir, err := os.MkdirTemp("", "save-layers-with-directories")
-	assert.Assert(c, err == nil, "failed to create temporary directory: %s", err)
-	extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir")
-	os.Mkdir(extractionDirectory, 0777)
-
-	defer os.RemoveAll(tmpDir)
-	buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
-	RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a
-	RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`))
-
-	out, err := RunCommandPipelineWithOutput(
-		exec.Command(dockerBinary, "save", name),
-		exec.Command("tar", "-xf", "-", "-C", extractionDirectory),
-	)
-	assert.NilError(c, err, "failed to save and extract image: %s", out)
-
-	dirs, err := os.ReadDir(extractionDirectory)
-	assert.NilError(c, err, "failed to get a listing of the layer directories: %s", err)
-
-	found := false
-	for _, entry := range dirs {
-		var entriesSansDev []string
-		if entry.IsDir() {
-			layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar")
-
-			f, err := os.Open(layerPath)
-			assert.NilError(c, err, "failed to open %s: %s", layerPath, err)
-
-			defer f.Close()
-
-			entries, err := listTar(f)
-			for _, e := range entries {
-				if !strings.Contains(e, "dev/") {
-					entriesSansDev = append(entriesSansDev, e)
-				}
-			}
-			assert.NilError(c, err, "encountered error while listing tar entries: %s", err)
-
-			if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) {
-				found = true
-				break
-			}
-		}
-	}
-
-	assert.Assert(c, found, "failed to find the layer with the right content listing")
-}
-
-func listTar(f io.Reader) ([]string, error) {
-	tr := tar.NewReader(f)
-	var entries []string
-
-	for {
-		th, err := tr.Next()
-		if err == io.EOF {
-			// end of tar archive
-			return entries, nil
-		}
-		if err != nil {
-			return entries, err
-		}
-		entries = append(entries, th.Name)
-	}
-}
-
 // Test loading a weird image where one of the layers is of zero size.
 // Test loading a weird image where one of the layers is of zero size.
 // The layer.tar file is actually zero bytes, no padding or anything else.
 // The layer.tar file is actually zero bytes, no padding or anything else.
 // See issue: 18170
 // See issue: 18170

+ 242 - 0
integration/image/save_test.go

@@ -0,0 +1,242 @@
+package image
+
+import (
+	"archive/tar"
+	"context"
+	"encoding/json"
+	"io"
+	"io/fs"
+	"os"
+	"path/filepath"
+	"reflect"
+	"sort"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/cpuguy83/tar2go"
+	"github.com/docker/docker/api/types"
+	containerapi "github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/integration/internal/build"
+	"github.com/docker/docker/integration/internal/container"
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/testutil/fakecontext"
+	"github.com/opencontainers/go-digest"
+	"gotest.tools/v3/assert"
+	"gotest.tools/v3/assert/cmp"
+	"gotest.tools/v3/skip"
+)
+
+type imageSaveManifestEntry struct {
+	Config   string
+	RepoTags []string
+	Layers   []string
+}
+
+func tarIndexFS(t *testing.T, rdr io.Reader) fs.FS {
+	t.Helper()
+
+	dir := t.TempDir()
+
+	f, err := os.Create(filepath.Join(dir, "image.tar"))
+	assert.NilError(t, err)
+
+	// Do not close at the end of this function otherwise the indexer won't work
+	t.Cleanup(func() { f.Close() })
+
+	_, err = io.Copy(f, rdr)
+	assert.NilError(t, err)
+
+	return tar2go.NewIndex(f).FS()
+}
+
+func TestSaveCheckTimes(t *testing.T) {
+	t.Parallel()
+
+	defer setupTest(t)()
+	client := testEnv.APIClient()
+	ctx := context.Background()
+
+	const repoName = "busybox:latest"
+	img, _, err := client.ImageInspectWithRaw(ctx, repoName)
+	assert.NilError(t, err)
+
+	rdr, err := client.ImageSave(ctx, []string{repoName})
+	assert.NilError(t, err)
+
+	tarfs := tarIndexFS(t, rdr)
+
+	dt, err := fs.ReadFile(tarfs, "manifest.json")
+	assert.NilError(t, err)
+
+	var ls []imageSaveManifestEntry
+	assert.NilError(t, json.Unmarshal(dt, &ls))
+	assert.Assert(t, cmp.Len(ls, 1))
+
+	info, err := fs.Stat(tarfs, ls[0].Config)
+	assert.NilError(t, err)
+
+	created, err := time.Parse(time.RFC3339, img.Created)
+	assert.NilError(t, err)
+
+	assert.Equal(t, created.Format(time.RFC3339), info.ModTime().Format(time.RFC3339), "expected: %s, actual: %s", created, info.ModTime())
+}
+
+func TestSaveRepoWithMultipleImages(t *testing.T) {
+	defer setupTest(t)()
+	ctx := context.Background()
+	client := testEnv.APIClient()
+
+	makeImage := func(from string, tag string) string {
+		id := container.Run(ctx, t, client, func(cfg *container.TestContainerConfig) {
+			cfg.Config.Image = from
+			cfg.Config.Cmd = []string{"true"}
+		})
+
+		chW, chErr := client.ContainerWait(ctx, id, containerapi.WaitConditionNotRunning)
+
+		ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+		defer cancel()
+
+		select {
+		case <-chW:
+		case err := <-chErr:
+			assert.NilError(t, err)
+		case <-ctx.Done():
+			t.Fatal("timeout waiting for container to exit")
+		}
+
+		res, err := client.ContainerCommit(ctx, id, types.ContainerCommitOptions{Reference: tag})
+		assert.NilError(t, err)
+
+		err = client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true})
+		assert.NilError(t, err)
+
+		return res.ID
+	}
+
+	repoName := "foobar-save-multi-images-test"
+	tagFoo := repoName + ":foo"
+	tagBar := repoName + ":bar"
+
+	idFoo := makeImage("busybox:latest", tagFoo)
+	idBar := makeImage("busybox:latest", tagBar)
+
+	client.ImageRemove(ctx, repoName, types.ImageRemoveOptions{Force: true})
+
+	rdr, err := client.ImageSave(ctx, []string{repoName, "busybox:latest"})
+	assert.NilError(t, err)
+	defer rdr.Close()
+
+	tarfs := tarIndexFS(t, rdr)
+
+	dt, err := fs.ReadFile(tarfs, "manifest.json")
+	assert.NilError(t, err)
+
+	var mfstLs []imageSaveManifestEntry
+	assert.NilError(t, json.Unmarshal(dt, &mfstLs))
+
+	actual := make([]string, 0, len(mfstLs))
+	for _, m := range mfstLs {
+		actual = append(actual, strings.TrimPrefix(m.Config, "blobs/sha256/"))
+		// make sure the blob actually exists
+		_, err := fs.Stat(tarfs, m.Config)
+		assert.Check(t, cmp.Nil(err))
+	}
+
+	// make the list of expected layers
+	img, _, err := client.ImageInspectWithRaw(ctx, "busybox:latest")
+	assert.NilError(t, err)
+
+	expected := []string{img.ID, idFoo, idBar}
+
+	// prefixes are not in tar
+	for i := range expected {
+		expected[i] = digest.Digest(expected[i]).Encoded()
+	}
+
+	sort.Strings(actual)
+	sort.Strings(expected)
+	assert.Assert(t, cmp.DeepEqual(actual, expected), "archive does not contains the right layers: got %v, expected %v", actual, expected)
+}
+
+func TestSaveDirectoryPermissions(t *testing.T) {
+	skip.If(t, testEnv.OSType == "windows", "Test is looking at linux specific details")
+
+	defer setupTest(t)()
+
+	ctx := context.Background()
+	client := testEnv.APIClient()
+
+	layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"}
+	layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"}
+
+	dockerfile := `FROM busybox
+RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a
+RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`
+
+	imgID := build.Do(ctx, t, client, fakecontext.New(t, t.TempDir(), fakecontext.WithDockerfile(dockerfile)))
+
+	rdr, err := client.ImageSave(ctx, []string{imgID})
+	assert.NilError(t, err)
+	defer rdr.Close()
+
+	tarfs := tarIndexFS(t, rdr)
+
+	dt, err := fs.ReadFile(tarfs, "manifest.json")
+	assert.NilError(t, err)
+
+	var mfstLs []imageSaveManifestEntry
+	assert.NilError(t, json.Unmarshal(dt, &mfstLs))
+
+	var found bool
+
+	for _, p := range mfstLs[0].Layers {
+		var entriesSansDev []string
+
+		f, err := tarfs.Open(p)
+		assert.NilError(t, err)
+
+		entries, err := listTar(f)
+		f.Close()
+		assert.NilError(t, err)
+
+		for _, e := range entries {
+			if !strings.Contains(e, "dev/") {
+				entriesSansDev = append(entriesSansDev, e)
+			}
+		}
+		assert.NilError(t, err, "encountered error while listing tar entries: %s", err)
+
+		if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) {
+			found = true
+			break
+		}
+	}
+
+	assert.Assert(t, found, "failed to find the layer with the right content listing")
+}
+
+func listTar(f io.Reader) ([]string, error) {
+	// If using the containerd snapshotter, the tar file may be compressed
+	dec, err := archive.DecompressStream(f)
+	if err != nil {
+		return nil, err
+	}
+	defer dec.Close()
+
+	tr := tar.NewReader(dec)
+	var entries []string
+
+	for {
+		th, err := tr.Next()
+		if err == io.EOF {
+			// end of tar archive
+			return entries, nil
+		}
+		if err != nil {
+			return entries, err
+		}
+		entries = append(entries, th.Name)
+	}
+}

+ 1 - 0
vendor.mod

@@ -31,6 +31,7 @@ require (
 	github.com/containerd/fifo v1.1.0
 	github.com/containerd/fifo v1.1.0
 	github.com/containerd/typeurl/v2 v2.1.0
 	github.com/containerd/typeurl/v2 v2.1.0
 	github.com/coreos/go-systemd/v22 v22.5.0
 	github.com/coreos/go-systemd/v22 v22.5.0
+	github.com/cpuguy83/tar2go v0.3.1
 	github.com/creack/pty v1.1.18
 	github.com/creack/pty v1.1.18
 	github.com/deckarep/golang-set/v2 v2.3.0
 	github.com/deckarep/golang-set/v2 v2.3.0
 	github.com/docker/distribution v2.8.2+incompatible
 	github.com/docker/distribution v2.8.2+incompatible

+ 2 - 0
vendor.sum

@@ -474,6 +474,8 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc
 github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
 github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/tar2go v0.3.1 h1:DMWlaIyoh9FBWR4hyfZSOEDA7z8rmCiGF1IJIzlTlR8=
+github.com/cpuguy83/tar2go v0.3.1/go.mod h1:2Ys2/Hu+iPHQRa4DjIVJ7UAaKnDhAhNACeK3A0Rr5rM=
 github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
 github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=

+ 21 - 0
vendor/github.com/cpuguy83/tar2go/LICENSE.md

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2022 Brian Goff
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 49 - 0
vendor/github.com/cpuguy83/tar2go/README.md

@@ -0,0 +1,49 @@
+# tar2go
+
+tar2go implements are go [fs.FS](https://pkg.go.dev/io/fs#FS) for tar files.
+
+Tars are not indexed so by themselves don't really have support for random access.
+When a request to open/stat a file is made tar2go will scan through the tar, indexing each entry along the way, until the file is found in the tar.
+A tar file is only ever scanned 1 time and scanning is done lazily (as needed to index the requested entry).
+
+tar2go does not support modifying a tar file, however there is support for modifying the in-memory representation of the tar which will show up in the `fs.FS`.
+You can also write a new tar file with requested modifications.
+
+### Usage
+
+```go
+  f, _ := os.Open(p)
+  defer f.Close()
+  
+  // Entrypoint into this library
+  idx := NewIndex(f)
+  
+  // Get the `fs.FS` implementation
+  goFS := idx.FS()
+  // Do stuff with your fs
+  // ...
+  
+  
+  // Add or replace a file in the index
+  _ := idx.Replace("foo", strings.NewReader("random stuff")
+  data, _ := fs.ReadFile(goFS, "foo")
+  if string(data) != "random stuff") {
+    panic("unexpected data")
+  }
+  
+  // Delete a file in the index
+  _ := idx.Replace("foo", nil)
+  if _, err := fs.ReadFile(goFS, "foo"); !errors.Is(err, fs.ErrNotExist) {
+    panic(err)
+  }
+  
+  // Create a new tar with updated content
+  // First we need to create an `io.Writer`, which is where the updated tar stream will be written to.
+  f, _ := os.CreateTemp("", "updated")
+  idx.Update(f, func(name string, rdr ReaderAtSized) (ReaderAtSized, bool, error) {
+    // Update calls this function for every file in the tar
+    // The returned `ReaderAtSized` is used instead of the content passed in (rdr).
+    // To make no changes just return the same rdr back.
+    // Return true for the bool value if the content is changed.
+  })
+```

+ 66 - 0
vendor/github.com/cpuguy83/tar2go/file.go

@@ -0,0 +1,66 @@
+package tar2go
+
+import (
+	"archive/tar"
+	"io"
+	"io/fs"
+	"time"
+)
+
+type file struct {
+	idx *indexReader
+	rdr *io.SectionReader
+}
+
+func newFile(idx *indexReader) *file {
+	return &file{idx: idx, rdr: io.NewSectionReader(idx.rdr, idx.offset, idx.size)}
+}
+
+type fileinfo struct {
+	h *tar.Header
+}
+
+func (f *fileinfo) Name() string {
+	return f.h.Name
+}
+
+func (f *fileinfo) Size() int64 {
+	return f.h.Size
+}
+
+func (f *fileinfo) Mode() fs.FileMode {
+	return fs.FileMode(f.h.Mode)
+}
+
+func (f *fileinfo) ModTime() time.Time {
+	return f.h.ModTime
+}
+
+func (f *fileinfo) IsDir() bool {
+	return f.h.Typeflag == tar.TypeDir
+}
+
+func (f *file) Close() error {
+	return nil
+}
+
+func (f *fileinfo) Sys() interface{} {
+	h := *f.h
+	return &h
+}
+
+func (f *file) Read(p []byte) (int, error) {
+	return f.rdr.Read(p)
+}
+
+func (f *file) ReadAt(p []byte, off int64) (int, error) {
+	return f.rdr.ReadAt(p, off)
+}
+
+func (f *file) Size() int64 {
+	return f.rdr.Size()
+}
+
+func (f *file) Stat() (fs.FileInfo, error) {
+	return &fileinfo{h: f.idx.hdr}, nil
+}

+ 30 - 0
vendor/github.com/cpuguy83/tar2go/fs.go

@@ -0,0 +1,30 @@
+package tar2go
+
+import (
+	"io/fs"
+)
+
+var (
+	_ fs.FS   = &filesystem{}
+	_ fs.File = &file{}
+)
+
+type filesystem struct {
+	idx *Index
+}
+
+func (f *filesystem) Open(name string) (fs.File, error) {
+	idx, err := f.idx.indexWithLock(name)
+	if err != nil {
+		return nil, &fs.PathError{Path: name, Op: "open", Err: err}
+	}
+	return newFile(idx), nil
+}
+
+func (f *filesystem) Stat(name string) (fs.FileInfo, error) {
+	idx, err := f.idx.indexWithLock(name)
+	if err != nil {
+		return nil, &fs.PathError{Path: name, Op: "stat", Err: err}
+	}
+	return &fileinfo{h: idx.hdr}, nil
+}

+ 190 - 0
vendor/github.com/cpuguy83/tar2go/index.go

@@ -0,0 +1,190 @@
+package tar2go
+
+import (
+	"archive/tar"
+	"errors"
+	"fmt"
+	"io"
+	"io/fs"
+	"sync"
+)
+
+var (
+	// ErrDelete should be returned by an UpdaterFn when the file should be deleted.
+	ErrDelete = errors.New("delete")
+)
+
+// Index is a tar index that can be used to read files from a tar.
+type Index struct {
+	rdr *io.SectionReader
+	tar *tar.Reader
+	mu  sync.Mutex
+	idx map[string]*indexReader
+}
+
+// NewIndex creates a new Index from the passed in io.ReaderAt.
+func NewIndex(rdr io.ReaderAt) *Index {
+	ras, ok := rdr.(ReaderAtSized)
+	var size int64
+	if !ok {
+		size = 1<<63 - 1
+	} else {
+		size = ras.Size()
+	}
+	return &Index{
+		rdr: io.NewSectionReader(rdr, 0, size),
+		idx: make(map[string]*indexReader),
+	}
+}
+
+func (i *Index) indexWithLock(name string) (*indexReader, error) {
+	i.mu.Lock()
+	defer i.mu.Unlock()
+	return i.index(name)
+}
+
+func filterFSPrefix(name string) string {
+	if len(name) <= 1 {
+		return name
+	}
+	if name[0] == '/' {
+		return name[1:]
+	}
+	if len(name) > 2 && name[0] == '.' && name[1] == '/' {
+		return name[2:]
+	}
+	return name
+}
+
+// This function must be called with the lock held.
+func (i *Index) index(name string) (*indexReader, error) {
+	name = filterFSPrefix(name)
+	if rdr, ok := i.idx[name]; ok {
+		return rdr, nil
+	}
+
+	if i.tar == nil {
+		i.tar = tar.NewReader(i.rdr)
+	}
+
+	for {
+		hdr, err := i.tar.Next()
+		if err != nil {
+			if err == io.EOF {
+				return nil, fs.ErrNotExist
+			}
+			return nil, fmt.Errorf("error indexing tar: %w", err)
+		}
+
+		pos, err := i.rdr.Seek(0, io.SeekCurrent)
+		if err != nil {
+			return nil, fmt.Errorf("error getting file offset: %w", err)
+		}
+		rdr := &indexReader{rdr: i.rdr, offset: pos, size: hdr.Size, hdr: hdr}
+		hdrName := filterFSPrefix(hdr.Name)
+		i.idx[hdrName] = rdr
+
+		if hdrName == name {
+			return rdr, nil
+		}
+	}
+}
+
+// Reader returns an io.ReaderAt that can be used to read the entire tar.
+func (i *Index) Reader() *io.SectionReader {
+	return io.NewSectionReader(i.rdr, 0, i.rdr.Size())
+}
+
+// FS returns an fs.FS that can be used to read the files in the tar.
+func (i *Index) FS() fs.FS {
+	return &filesystem{idx: i}
+}
+
+// ReaderAtSized is an io.ReaderAt that also implements a Size method.
+type ReaderAtSized interface {
+	io.ReaderAt
+	Size() int64
+}
+
+// UpdaterFn is a function that is passed the name of the file and a ReaderAtSized
+type UpdaterFn func(string, ReaderAtSized) (ReaderAtSized, bool, error)
+
+// Replace replaces the file with the passed in name with the passed in ReaderAtSized.
+// If the passed in ReaderAtSized is nil, the file will be deleted.
+// If the file does not exist, it will be added.
+//
+// This function does not update the actual tar file, it only updates the index.
+func (i *Index) Replace(name string, rdr ReaderAtSized) error {
+	i.mu.Lock()
+	defer i.mu.Unlock()
+
+	// index may overwrite it this replacement.
+	i.index(name)
+
+	if rdr == nil {
+		delete(i.idx, name)
+		return nil
+	}
+
+	i.idx[name] = &indexReader{rdr: rdr, offset: 0, size: rdr.Size(), hdr: &tar.Header{
+		Name: name,
+		Size: rdr.Size(),
+	}}
+	return nil
+}
+
+// Update creates a new tar with the files updated by the passed in updater function.
+// The output tar is written to the passed in io.Writer
+func (i *Index) Update(w io.Writer, updater UpdaterFn) error {
+	tw := tar.NewWriter(w)
+	defer tw.Close()
+
+	rdr := i.Reader()
+	tr := tar.NewReader(rdr)
+
+	for {
+		hdr, err := tr.Next()
+		if err != nil {
+			if err == io.EOF {
+				return nil
+			}
+			return fmt.Errorf("error reading tar: %w", err)
+		}
+
+		offset, err := rdr.Seek(0, io.SeekCurrent)
+		if err != nil {
+			return fmt.Errorf("error getting file offset: %w", err)
+		}
+
+		ra, updated, err := updater(hdr.Name, io.NewSectionReader(i.rdr, offset, hdr.Size))
+		if err != nil {
+			if err == ErrDelete {
+				continue
+			}
+			return fmt.Errorf("error updating file %s: %w", hdr.Name, err)
+		}
+
+		if updated {
+			hdr.Size = ra.Size()
+		}
+
+		if err := tw.WriteHeader(hdr); err != nil {
+			return fmt.Errorf("error writing tar header: %w", err)
+		}
+
+		if _, err := io.Copy(tw, io.NewSectionReader(ra, 0, ra.Size())); err != nil {
+			return fmt.Errorf("error writing tar file: %w", err)
+		}
+	}
+}
+
+type indexReader struct {
+	rdr    io.ReaderAt
+	offset int64
+	size   int64
+	hdr    *tar.Header
+}
+
+func (r *indexReader) Reader() *io.SectionReader {
+	return io.NewSectionReader(r.rdr, r.offset, r.size)
+}

+ 3 - 0
vendor/modules.txt

@@ -358,6 +358,9 @@ github.com/coreos/go-systemd/v22/activation
 github.com/coreos/go-systemd/v22/daemon
 github.com/coreos/go-systemd/v22/daemon
 github.com/coreos/go-systemd/v22/dbus
 github.com/coreos/go-systemd/v22/dbus
 github.com/coreos/go-systemd/v22/journal
 github.com/coreos/go-systemd/v22/journal
+# github.com/cpuguy83/tar2go v0.3.1
+## explicit; go 1.19
+github.com/cpuguy83/tar2go
 # github.com/creack/pty v1.1.18
 # github.com/creack/pty v1.1.18
 ## explicit; go 1.13
 ## explicit; go 1.13
 github.com/creack/pty
 github.com/creack/pty