integration/save: Add tests checking OCI archive output
Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
(cherry picked from commit 2ef0b53e51
)
Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
This commit is contained in:
parent
225e043196
commit
147b5388dd
4 changed files with 338 additions and 26 deletions
|
@ -18,6 +18,8 @@ import (
|
|||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/integration/internal/build"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/internal/testutils"
|
||||
"github.com/docker/docker/internal/testutils/specialimage"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/testutil/fakecontext"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
@ -88,45 +90,126 @@ func TestSaveCheckTimes(t *testing.T) {
|
|||
}
|
||||
|
||||
// Regression test for https://github.com/moby/moby/issues/47065
|
||||
func TestSaveCheckManifestLayers(t *testing.T) {
|
||||
func TestSaveOCI(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.44"), "OCI layout support was introduced in v25")
|
||||
|
||||
ctx := setupTest(t)
|
||||
client := testEnv.APIClient()
|
||||
|
||||
t.Parallel()
|
||||
|
||||
const repoName = "busybox:latest"
|
||||
img, _, err := client.ImageInspectWithRaw(ctx, repoName)
|
||||
const busybox = "busybox:latest"
|
||||
inspectBusybox, _, err := client.ImageInspectWithRaw(ctx, busybox)
|
||||
assert.NilError(t, err)
|
||||
|
||||
rdr, err := client.ImageSave(ctx, []string{repoName})
|
||||
assert.NilError(t, err)
|
||||
type testCase struct {
|
||||
image string
|
||||
expectedOCIRef string
|
||||
expectedContainerdRef string
|
||||
}
|
||||
|
||||
tarfs := tarIndexFS(t, rdr)
|
||||
testCases := []testCase{
|
||||
// Busybox by tagged name
|
||||
testCase{image: busybox, expectedContainerdRef: "docker.io/library/busybox:latest", expectedOCIRef: "latest"},
|
||||
|
||||
indexData, err := fs.ReadFile(tarfs, "index.json")
|
||||
assert.NilError(t, err)
|
||||
// Busybox by ID
|
||||
testCase{image: inspectBusybox.ID},
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
assert.NilError(t, json.Unmarshal(indexData, &index))
|
||||
if testEnv.DaemonInfo.OSType != "windows" {
|
||||
multiLayerImage := specialimage.Load(ctx, t, client, specialimage.MultiLayer)
|
||||
// Multi-layer image
|
||||
testCases = append(testCases, testCase{image: multiLayerImage, expectedContainerdRef: "docker.io/library/multilayer:latest", expectedOCIRef: "latest"})
|
||||
|
||||
assert.Assert(t, is.Len(index.Manifests, 1))
|
||||
}
|
||||
|
||||
manifestData, err := fs.ReadFile(tarfs, "blobs/sha256/"+index.Manifests[0].Digest.Encoded())
|
||||
assert.NilError(t, err)
|
||||
// Busybox frozen image will have empty RepoDigests when loaded into the
|
||||
// graphdriver image store so we can't use it.
|
||||
// This will work with the containerd image store though.
|
||||
if len(inspectBusybox.RepoDigests) > 0 {
|
||||
// Digested reference
|
||||
testCases = append(testCases, testCase{
|
||||
image: inspectBusybox.RepoDigests[0],
|
||||
})
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
assert.NilError(t, json.Unmarshal(manifestData, &manifest))
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.image, func(t *testing.T) {
|
||||
// Get information about the original image.
|
||||
inspect, _, err := client.ImageInspectWithRaw(ctx, tc.image)
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.Check(t, is.Len(manifest.Layers, len(img.RootFS.Layers)))
|
||||
for _, l := range manifest.Layers {
|
||||
stat, err := fs.Stat(tarfs, "blobs/sha256/"+l.Digest.Encoded())
|
||||
if !assert.Check(t, err) {
|
||||
continue
|
||||
}
|
||||
rdr, err := client.ImageSave(ctx, []string{tc.image})
|
||||
assert.NilError(t, err)
|
||||
defer rdr.Close()
|
||||
|
||||
assert.Check(t, is.Equal(l.Size, stat.Size()))
|
||||
tarfs := tarIndexFS(t, rdr)
|
||||
|
||||
indexData, err := fs.ReadFile(tarfs, "index.json")
|
||||
assert.NilError(t, err, "failed to read index.json")
|
||||
|
||||
var index ocispec.Index
|
||||
assert.NilError(t, json.Unmarshal(indexData, &index), "failed to unmarshal index.json")
|
||||
|
||||
// All test images are single-platform, so they should have only one manifest.
|
||||
assert.Assert(t, is.Len(index.Manifests, 1))
|
||||
|
||||
manifestData, err := fs.ReadFile(tarfs, "blobs/sha256/"+index.Manifests[0].Digest.Encoded())
|
||||
assert.NilError(t, err)
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
assert.NilError(t, json.Unmarshal(manifestData, &manifest))
|
||||
|
||||
t.Run("Manifest", func(t *testing.T) {
|
||||
assert.Check(t, is.Len(manifest.Layers, len(inspect.RootFS.Layers)))
|
||||
|
||||
var digests []string
|
||||
// Check if layers referenced by the manifest exist in the archive
|
||||
// and match the layers from the original image.
|
||||
for _, l := range manifest.Layers {
|
||||
layerPath := "blobs/sha256/" + l.Digest.Encoded()
|
||||
stat, err := fs.Stat(tarfs, layerPath)
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.Check(t, is.Equal(l.Size, stat.Size()))
|
||||
|
||||
f, err := tarfs.Open(layerPath)
|
||||
assert.NilError(t, err)
|
||||
|
||||
layerDigest, err := testutils.UncompressedTarDigest(f)
|
||||
f.Close()
|
||||
|
||||
assert.NilError(t, err)
|
||||
|
||||
digests = append(digests, layerDigest.String())
|
||||
}
|
||||
|
||||
assert.Check(t, is.DeepEqual(digests, inspect.RootFS.Layers))
|
||||
})
|
||||
|
||||
t.Run("Config", func(t *testing.T) {
|
||||
configData, err := fs.ReadFile(tarfs, "blobs/sha256/"+manifest.Config.Digest.Encoded())
|
||||
assert.NilError(t, err)
|
||||
|
||||
var config ocispec.Image
|
||||
assert.NilError(t, json.Unmarshal(configData, &config))
|
||||
|
||||
var diffIDs []string
|
||||
for _, l := range config.RootFS.DiffIDs {
|
||||
diffIDs = append(diffIDs, l.String())
|
||||
}
|
||||
|
||||
assert.Check(t, is.DeepEqual(diffIDs, inspect.RootFS.Layers))
|
||||
})
|
||||
|
||||
t.Run("Containerd image name", func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(index.Manifests[0].Annotations["io.containerd.image.name"], tc.expectedContainerdRef))
|
||||
})
|
||||
|
||||
t.Run("OCI reference tag", func(t *testing.T) {
|
||||
assert.Check(t, is.Equal(index.Manifests[0].Annotations["org.opencontainers.image.ref.name"], tc.expectedOCIRef))
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
24
internal/testutils/archive.go
Normal file
24
internal/testutils/archive.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
package testutils
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// UncompressedTarDigest returns the canonical digest of the uncompressed tar stream.
|
||||
func UncompressedTarDigest(compressedTar io.Reader) (digest.Digest, error) {
|
||||
rd, err := archive.DecompressStream(compressedTar)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer rd.Close()
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
if _, err := io.Copy(digester.Hash(), rd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return digester.Digest(), nil
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package specialimage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
@ -41,7 +42,10 @@ func Load(ctx context.Context, t *testing.T, apiClient client.APIClient, imageFu
|
|||
t.Fatalf("Failed load: %s", string(respBody))
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
all, err := io.ReadAll(resp.Body)
|
||||
assert.NilError(t, err)
|
||||
|
||||
decoder := json.NewDecoder(bytes.NewReader(all))
|
||||
for {
|
||||
var msg jsonmessage.JSONMessage
|
||||
err := decoder.Decode(&msg)
|
||||
|
@ -61,6 +65,6 @@ func Load(ctx context.Context, t *testing.T, apiClient client.APIClient, imageFu
|
|||
}
|
||||
}
|
||||
|
||||
t.Fatal("failed to read image ID")
|
||||
t.Fatalf("failed to read image ID\n%s", string(all))
|
||||
return ""
|
||||
}
|
||||
|
|
201
internal/testutils/specialimage/multilayer.go
Normal file
201
internal/testutils/specialimage/multilayer.go
Normal file
|
@ -0,0 +1,201 @@
|
|||
package specialimage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/google/uuid"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
func MultiLayer(dir string) error {
|
||||
const imageRef = "multilayer:latest"
|
||||
|
||||
layer1Desc, err := writeLayerWithOneFile(dir, "foo", []byte("1"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
layer2Desc, err := writeLayerWithOneFile(dir, "bar", []byte("2"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
layer3Desc, err := writeLayerWithOneFile(dir, "hello", []byte("world"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configDesc, err := writeJsonBlob(dir, ocispec.MediaTypeImageConfig, ocispec.Image{
|
||||
Platform: platforms.DefaultSpec(),
|
||||
Config: ocispec.ImageConfig{
|
||||
Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
|
||||
},
|
||||
RootFS: ocispec.RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: []digest.Digest{layer1Desc.Digest, layer2Desc.Digest, layer3Desc.Digest},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifest := ocispec.Manifest{
|
||||
MediaType: ocispec.MediaTypeImageManifest,
|
||||
Config: configDesc,
|
||||
Layers: []ocispec.Descriptor{layer1Desc, layer2Desc, layer3Desc},
|
||||
}
|
||||
|
||||
legacyManifests := []manifestItem{
|
||||
manifestItem{
|
||||
Config: blobPath(configDesc),
|
||||
RepoTags: []string{imageRef},
|
||||
Layers: []string{blobPath(layer1Desc), blobPath(layer2Desc), blobPath(layer3Desc)},
|
||||
},
|
||||
}
|
||||
|
||||
ref, err := reference.ParseNormalizedNamed(imageRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return singlePlatformImage(dir, ref, manifest, legacyManifests)
|
||||
}
|
||||
|
||||
// Legacy manifest item (manifests.json)
|
||||
type manifestItem struct {
|
||||
Config string
|
||||
RepoTags []string
|
||||
Layers []string
|
||||
}
|
||||
|
||||
func singlePlatformImage(dir string, ref reference.Named, manifest ocispec.Manifest, legacyManifests []manifestItem) error {
|
||||
manifestDesc, err := writeJsonBlob(dir, ocispec.MediaTypeImageManifest, manifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ref != nil {
|
||||
manifestDesc.Annotations = map[string]string{
|
||||
"io.containerd.image.name": ref.String(),
|
||||
}
|
||||
|
||||
if tagged, ok := ref.(reference.Tagged); ok {
|
||||
manifestDesc.Annotations[ocispec.AnnotationRefName] = tagged.Tag()
|
||||
}
|
||||
}
|
||||
|
||||
if err := writeJson(ocispec.Index{
|
||||
Versioned: specs.Versioned{SchemaVersion: 2},
|
||||
MediaType: ocispec.MediaTypeImageIndex,
|
||||
Manifests: []ocispec.Descriptor{manifestDesc},
|
||||
}, filepath.Join(dir, "index.json")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeJson(legacyManifests, filepath.Join(dir, "manifest.json")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(filepath.Join(dir, "oci-layout"), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0o644)
|
||||
}
|
||||
|
||||
func fileArchive(dir string, name string, content []byte) (io.ReadCloser, error) {
|
||||
tmp, err := os.MkdirTemp("", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filepath.Join(tmp, name), content, 0o644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return archive.Tar(tmp, archive.Uncompressed)
|
||||
}
|
||||
|
||||
func writeLayerWithOneFile(dir string, filename string, content []byte) (ocispec.Descriptor, error) {
|
||||
rd, err := fileArchive(dir, filename, content)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
return writeBlob(dir, ocispec.MediaTypeImageLayer, rd)
|
||||
}
|
||||
|
||||
func writeJsonBlob(dir string, mt string, obj any) (ocispec.Descriptor, error) {
|
||||
b, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
return writeBlob(dir, mt, bytes.NewReader(b))
|
||||
}
|
||||
|
||||
func writeJson(obj any, path string) error {
|
||||
b, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(path, b, 0o644)
|
||||
}
|
||||
|
||||
func writeBlob(dir string, mt string, rd io.Reader) (_ ocispec.Descriptor, outErr error) {
|
||||
digester := digest.Canonical.Digester()
|
||||
hashTee := io.TeeReader(rd, digester.Hash())
|
||||
|
||||
blobsPath := filepath.Join(dir, "blobs", "sha256")
|
||||
if err := os.MkdirAll(blobsPath, 0o755); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
tmpPath := filepath.Join(blobsPath, uuid.New().String())
|
||||
file, err := os.Create(tmpPath)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if outErr != nil {
|
||||
file.Close()
|
||||
os.Remove(tmpPath)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(file, hashTee); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
digest := digester.Digest()
|
||||
|
||||
stat, err := os.Stat(tmpPath)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
file.Close()
|
||||
if err := os.Rename(tmpPath, filepath.Join(blobsPath, digest.Encoded())); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
return ocispec.Descriptor{
|
||||
MediaType: mt,
|
||||
Digest: digest,
|
||||
Size: stat.Size(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func blobPath(desc ocispec.Descriptor) string {
|
||||
return "blobs/sha256/" + desc.Digest.Encoded()
|
||||
}
|
Loading…
Reference in a new issue