Merge pull request #44617 from thaJeztah/23.0_backport_images_errdefs

[23.0 backport] image/store: Use errdefs for errors
This commit is contained in:
Sebastiaan van Stijn 2022-12-09 21:31:25 +01:00 committed by GitHub
commit 47069ae18c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 128 additions and 115 deletions

View file

@ -109,7 +109,7 @@ func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.F
}
hfi := &fileInfo{
sum: h.Digest().Hex(),
sum: h.Digest().Encoded(),
}
cs.txn.Insert([]byte(p), hfi)
cs.mu.Unlock()

View file

@ -127,7 +127,7 @@ deleteImagesLoop:
}
}
} else {
hex := id.Digest().Hex()
hex := id.Digest().Encoded()
imgDel, err := i.ImageDelete(hex, false, true)
if imageDeleteFailed(hex, err) {
continue

View file

@ -17,7 +17,7 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"go.etcd.io/bbolt"
"gotest.tools/v3/assert"
"gotest.tools/v3/assert/cmp"
is "gotest.tools/v3/assert/cmp"
)
func setupTestStores(t *testing.T) (context.Context, content.Store, *imageStoreWithLease, func(t *testing.T)) {
@ -75,24 +75,24 @@ func TestImageDelete(t *testing.T) {
ls, err := images.leases.List(ctx)
assert.NilError(t, err)
assert.Check(t, cmp.Equal(len(ls), 1), ls)
assert.Check(t, is.Equal(len(ls), 1), ls)
_, err = images.Delete(id)
assert.NilError(t, err)
ls, err = images.leases.List(ctx)
assert.NilError(t, err)
assert.Check(t, cmp.Equal(len(ls), 0), ls)
assert.Check(t, is.Equal(len(ls), 0), ls)
})
}
func TestContentStoreForPull(t *testing.T) {
ctx, cs, is, cleanup := setupTestStores(t)
ctx, cs, imgStore, cleanup := setupTestStores(t)
defer cleanup(t)
csP := &contentStoreForPull{
ContentStore: cs,
leases: is.leases,
leases: imgStore.leases,
}
data := []byte(`{}`)
@ -112,12 +112,12 @@ func TestContentStoreForPull(t *testing.T) {
assert.NilError(t, err)
assert.Equal(t, len(csP.digested), 1)
assert.Check(t, cmp.Equal(csP.digested[0], desc.Digest))
assert.Check(t, is.Equal(csP.digested[0], desc.Digest))
// Test already exists
csP.digested = nil
_, err = csP.Writer(ctx, content.WithRef(t.Name()), content.WithDescriptor(desc))
assert.Check(t, c8derrdefs.IsAlreadyExists(err))
assert.Equal(t, len(csP.digested), 1)
assert.Check(t, cmp.Equal(csP.digested[0], desc.Digest))
assert.Check(t, is.Equal(csP.digested[0], desc.Digest))
}

View file

@ -1,16 +1,14 @@
package cache
import (
"bytes"
"context"
"testing"
"time"
"bytes"
"github.com/docker/docker/daemon/logger"
"gotest.tools/v3/assert"
"gotest.tools/v3/assert/cmp"
is "gotest.tools/v3/assert/cmp"
)
type fakeLogger struct {
@ -75,7 +73,7 @@ func TestLog(t *testing.T) {
case <-ctx.Done():
t.Fatal("timed out waiting for messages... this is probably a test implementation error")
case msg = <-cacher.messages:
assert.Assert(t, cmp.DeepEqual(msg, m))
assert.Assert(t, is.DeepEqual(msg, m))
}
}
}

View file

@ -14,7 +14,7 @@ import (
"github.com/pkg/errors"
"gotest.tools/v3/assert"
"gotest.tools/v3/assert/cmp"
is "gotest.tools/v3/assert/cmp"
)
func TestSharedTempFileConverter(t *testing.T) {
@ -33,9 +33,9 @@ func TestSharedTempFileConverter(t *testing.T) {
t.Logf("Iteration %v", i)
rdr := convertPath(t, uut, name)
assert.Check(t, cmp.Equal("HELLO, WORLD!", readAll(t, rdr)))
assert.Check(t, is.Equal("HELLO, WORLD!", readAll(t, rdr)))
assert.Check(t, rdr.Close())
assert.Check(t, cmp.Equal(fs.ErrClosed, rdr.Close()), "closing an already-closed reader should return an error")
assert.Check(t, is.Equal(fs.ErrClosed, rdr.Close()), "closing an already-closed reader should return an error")
}
assert.NilError(t, os.Remove(name))
@ -67,15 +67,15 @@ func TestSharedTempFileConverter(t *testing.T) {
rb1 := convertPath(t, uut, bpath) // Same path, different file.
ra2 := convertPath(t, uut, apath) // New path, old file.
assert.Check(t, cmp.Equal(2, conversions), "expected only one conversion per unique file")
assert.Check(t, is.Equal(2, conversions), "expected only one conversion per unique file")
// Interleave reading and closing to shake out ref-counting bugs:
// closing one reader shouldn't affect any other open readers.
assert.Check(t, cmp.Equal("FILE A", readAll(t, ra1)))
assert.Check(t, is.Equal("FILE A", readAll(t, ra1)))
assert.NilError(t, ra1.Close())
assert.Check(t, cmp.Equal("FILE A", readAll(t, ra2)))
assert.Check(t, is.Equal("FILE A", readAll(t, ra2)))
assert.NilError(t, ra2.Close())
assert.Check(t, cmp.Equal("FILE B", readAll(t, rb1)))
assert.Check(t, is.Equal("FILE B", readAll(t, rb1)))
assert.NilError(t, rb1.Close())
assert.NilError(t, os.Remove(apath))
@ -120,7 +120,7 @@ func TestSharedTempFileConverter(t *testing.T) {
t.Logf("goroutine %v: enter", i)
defer t.Logf("goroutine %v: exit", i)
f := convertPath(t, uut, name)
assert.Check(t, cmp.Equal("HI THERE", readAll(t, f)), "in goroutine %v", i)
assert.Check(t, is.Equal("HI THERE", readAll(t, f)), "in goroutine %v", i)
closers <- f
}()
}
@ -138,12 +138,12 @@ func TestSharedTempFileConverter(t *testing.T) {
f := convertPath(t, uut, name)
closers <- f
close(closers)
assert.Check(t, cmp.Equal("HI THERE", readAll(t, f)), "after all goroutines returned")
assert.Check(t, is.Equal("HI THERE", readAll(t, f)), "after all goroutines returned")
for c := range closers {
assert.Check(t, c.Close())
}
assert.Check(t, cmp.Equal(int32(1), conversions))
assert.Check(t, is.Equal(int32(1), conversions))
assert.NilError(t, os.Remove(name))
checkDirEmpty(t, dir)
@ -197,7 +197,7 @@ func TestSharedTempFileConverter(t *testing.T) {
fakeErr = nil
f, err := uut.Do(src)
assert.Check(t, err)
assert.Check(t, cmp.Equal("HI THERE", readAll(t, f)))
assert.Check(t, is.Equal("HI THERE", readAll(t, f)))
assert.Check(t, f.Close())
// Files pending delete continue to show up in directory
@ -240,7 +240,7 @@ func checkDirEmpty(t *testing.T, path string) {
t.Helper()
ls, err := os.ReadDir(path)
assert.NilError(t, err)
assert.Check(t, cmp.Len(ls, 0), "directory should be free of temp files")
assert.Check(t, is.Len(ls, 0), "directory should be free of temp files")
}
func copyTransform(f func(string) string) func(dst io.WriteSeeker, src io.ReadSeeker) error {

View file

@ -117,11 +117,11 @@ func (serv *v2MetadataService) digestNamespace() string {
}
func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string {
return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Encoded()
}
func (serv *v2MetadataService) digestKey(dgst digest.Digest) string {
return string(dgst.Algorithm()) + "/" + dgst.Hex()
return string(dgst.Algorithm()) + "/" + dgst.Encoded()
}
// GetMetadata finds the metadata associated with a layer DiffID.

View file

@ -56,11 +56,11 @@ func newFSStore(root string) (*fs, error) {
}
func (s *fs) contentFile(dgst digest.Digest) string {
return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex())
return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Encoded())
}
func (s *fs) metadataDir(dgst digest.Digest) string {
return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex())
return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Encoded())
}
// Walk calls the supplied callback for each image ID in the storage backend.
@ -73,7 +73,7 @@ func (s *fs) Walk(f DigestWalkFunc) error {
return err
}
for _, v := range dir {
dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
dgst := digest.NewDigestFromEncoded(digest.Canonical, v.Name())
if err := dgst.Validate(); err != nil {
logrus.Debugf("skipping invalid digest %s: %s", dgst, err)
continue

View file

@ -31,7 +31,7 @@ func TestFSGetInvalidData(t *testing.T) {
dgst, err := store.Set([]byte("foobar"))
assert.Check(t, err)
err = os.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600)
err = os.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Encoded()), []byte("foobar2"), 0o600)
assert.Check(t, err)
_, err = store.Get(dgst)
@ -43,7 +43,7 @@ func TestFSInvalidSet(t *testing.T) {
defer cleanup()
id := digest.FromBytes([]byte("foobar"))
err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Hex()), 0700)
err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Encoded()), 0o700)
assert.Check(t, err)
_, err = store.Set([]byte("foobar"))

View file

@ -5,6 +5,7 @@ import (
"sync"
"time"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/system"
"github.com/opencontainers/go-digest"
@ -137,12 +138,12 @@ func (is *store) Create(config []byte) (ID, error) {
}
}
if layerCounter > len(img.RootFS.DiffIDs) {
return "", errors.New("too many non-empty layers in History section")
return "", errdefs.InvalidParameter(errors.New("too many non-empty layers in History section"))
}
dgst, err := is.fs.Set(config)
if err != nil {
return "", err
return "", errdefs.InvalidParameter(err)
}
imageID := IDFromDigest(dgst)
@ -158,11 +159,11 @@ func (is *store) Create(config []byte) (ID, error) {
var l layer.Layer
if layerID != "" {
if !system.IsOSSupported(img.OperatingSystem()) {
return "", system.ErrNotSupportedOperatingSystem
return "", errdefs.InvalidParameter(system.ErrNotSupportedOperatingSystem)
}
l, err = is.lss.Get(layerID)
if err != nil {
return "", errors.Wrapf(err, "failed to get layer %s", layerID)
return "", errdefs.InvalidParameter(errors.Wrapf(err, "failed to get layer %s", layerID))
}
}
@ -174,7 +175,7 @@ func (is *store) Create(config []byte) (ID, error) {
is.images[imageID] = imageMeta
if err := is.digestSet.Add(imageID.Digest()); err != nil {
delete(is.images, imageID)
return "", err
return "", errdefs.InvalidParameter(err)
}
return imageID, nil
@ -204,12 +205,12 @@ func (is *store) Get(id ID) (*Image, error) {
// todo: Detect manual insertions and start using them
config, err := is.fs.Get(id.Digest())
if err != nil {
return nil, err
return nil, errdefs.NotFound(err)
}
img, err := NewFromJSON(config)
if err != nil {
return nil, err
return nil, errdefs.InvalidParameter(err)
}
img.computedID = id
@ -227,11 +228,11 @@ func (is *store) Delete(id ID) ([]layer.Metadata, error) {
imageMeta := is.images[id]
if imageMeta == nil {
return nil, fmt.Errorf("unrecognized image ID %s", id.String())
return nil, errdefs.NotFound(fmt.Errorf("unrecognized image ID %s", id.String()))
}
_, err := is.Get(id)
if err != nil {
return nil, fmt.Errorf("unrecognized image %s, %v", id.String(), err)
return nil, errdefs.NotFound(fmt.Errorf("unrecognized image %s, %v", id.String(), err))
}
for id := range imageMeta.children {
is.fs.DeleteMetadata(id.Digest(), "parent")
@ -257,7 +258,7 @@ func (is *store) SetParent(id, parent ID) error {
defer is.Unlock()
parentMeta := is.images[parent]
if parentMeta == nil {
return fmt.Errorf("unknown parent image ID %s", parent.String())
return errdefs.NotFound(fmt.Errorf("unknown parent image ID %s", parent.String()))
}
if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil {
delete(is.images[parent].children, id)
@ -269,7 +270,7 @@ func (is *store) SetParent(id, parent ID) error {
func (is *store) GetParent(id ID) (ID, error) {
d, err := is.fs.GetMetadata(id.Digest(), "parent")
if err != nil {
return "", err
return "", errdefs.NotFound(err)
}
return ID(d), nil // todo: validate?
}

View file

@ -4,17 +4,18 @@ import (
"fmt"
"testing"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/layer"
"gotest.tools/v3/assert"
"gotest.tools/v3/assert/cmp"
is "gotest.tools/v3/assert/cmp"
)
func TestCreate(t *testing.T) {
is, cleanup := defaultImageStore(t)
imgStore, cleanup := defaultImageStore(t)
defer cleanup()
_, err := is.Create([]byte(`{}`))
assert.Check(t, cmp.Error(err, "invalid image JSON, no RootFS key"))
_, err := imgStore.Create([]byte(`{}`))
assert.Check(t, is.Error(err, "invalid image JSON, no RootFS key"))
}
func TestRestore(t *testing.T) {
@ -33,118 +34,131 @@ func TestRestore(t *testing.T) {
err = fs.SetMetadata(id2, "parent", []byte(id1))
assert.NilError(t, err)
is, err := NewImageStore(fs, &mockLayerGetReleaser{})
imgStore, err := NewImageStore(fs, &mockLayerGetReleaser{})
assert.NilError(t, err)
assert.Check(t, cmp.Len(is.Map(), 2))
assert.Check(t, is.Len(imgStore.Map(), 2))
img1, err := is.Get(ID(id1))
img1, err := imgStore.Get(ID(id1))
assert.NilError(t, err)
assert.Check(t, cmp.Equal(ID(id1), img1.computedID))
assert.Check(t, cmp.Equal(string(id1), img1.computedID.String()))
assert.Check(t, is.Equal(ID(id1), img1.computedID))
assert.Check(t, is.Equal(string(id1), img1.computedID.String()))
img2, err := is.Get(ID(id2))
img2, err := imgStore.Get(ID(id2))
assert.NilError(t, err)
assert.Check(t, cmp.Equal("abc", img1.Comment))
assert.Check(t, cmp.Equal("def", img2.Comment))
assert.Check(t, is.Equal("abc", img1.Comment))
assert.Check(t, is.Equal("def", img2.Comment))
_, err = is.GetParent(ID(id1))
_, err = imgStore.GetParent(ID(id1))
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
assert.ErrorContains(t, err, "failed to read metadata")
p, err := is.GetParent(ID(id2))
p, err := imgStore.GetParent(ID(id2))
assert.NilError(t, err)
assert.Check(t, cmp.Equal(ID(id1), p))
assert.Check(t, is.Equal(ID(id1), p))
children := is.Children(ID(id1))
assert.Check(t, cmp.Len(children, 1))
assert.Check(t, cmp.Equal(ID(id2), children[0]))
assert.Check(t, cmp.Len(is.Heads(), 1))
children := imgStore.Children(ID(id1))
assert.Check(t, is.Len(children, 1))
assert.Check(t, is.Equal(ID(id2), children[0]))
assert.Check(t, is.Len(imgStore.Heads(), 1))
sid1, err := is.Search(string(id1)[:10])
sid1, err := imgStore.Search(string(id1)[:10])
assert.NilError(t, err)
assert.Check(t, cmp.Equal(ID(id1), sid1))
assert.Check(t, is.Equal(ID(id1), sid1))
sid1, err = is.Search(id1.Hex()[:6])
sid1, err = imgStore.Search(id1.Encoded()[:6])
assert.NilError(t, err)
assert.Check(t, cmp.Equal(ID(id1), sid1))
assert.Check(t, is.Equal(ID(id1), sid1))
invalidPattern := id1.Hex()[1:6]
_, err = is.Search(invalidPattern)
assert.ErrorContains(t, err, "No such image")
invalidPattern := id1.Encoded()[1:6]
_, err = imgStore.Search(invalidPattern)
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
assert.Check(t, is.ErrorContains(err, invalidPattern))
}
func TestAddDelete(t *testing.T) {
is, cleanup := defaultImageStore(t)
imgStore, cleanup := defaultImageStore(t)
defer cleanup()
id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`))
id1, err := imgStore.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`))
assert.NilError(t, err)
assert.Check(t, cmp.Equal(ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"), id1))
assert.Check(t, is.Equal(ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"), id1))
img, err := is.Get(id1)
img, err := imgStore.Get(id1)
assert.NilError(t, err)
assert.Check(t, cmp.Equal("abc", img.Comment))
assert.Check(t, is.Equal("abc", img.Comment))
id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`))
id2, err := imgStore.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`))
assert.NilError(t, err)
err = is.SetParent(id2, id1)
err = imgStore.SetParent(id2, id1)
assert.NilError(t, err)
pid1, err := is.GetParent(id2)
pid1, err := imgStore.GetParent(id2)
assert.NilError(t, err)
assert.Check(t, cmp.Equal(pid1, id1))
assert.Check(t, is.Equal(pid1, id1))
_, err = is.Delete(id1)
_, err = imgStore.Delete(id1)
assert.NilError(t, err)
_, err = is.Get(id1)
_, err = imgStore.Get(id1)
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
assert.ErrorContains(t, err, "failed to get digest")
_, err = is.Get(id2)
_, err = imgStore.Get(id2)
assert.NilError(t, err)
_, err = is.GetParent(id2)
_, err = imgStore.GetParent(id2)
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
assert.ErrorContains(t, err, "failed to read metadata")
}
func TestSearchAfterDelete(t *testing.T) {
is, cleanup := defaultImageStore(t)
imgStore, cleanup := defaultImageStore(t)
defer cleanup()
id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`))
id, err := imgStore.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`))
assert.NilError(t, err)
id1, err := is.Search(string(id)[:15])
id1, err := imgStore.Search(string(id)[:15])
assert.NilError(t, err)
assert.Check(t, cmp.Equal(id1, id))
assert.Check(t, is.Equal(id1, id))
_, err = is.Delete(id)
_, err = imgStore.Delete(id)
assert.NilError(t, err)
_, err = is.Search(string(id)[:15])
_, err = imgStore.Search(string(id)[:15])
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
assert.ErrorContains(t, err, "No such image")
}
func TestParentReset(t *testing.T) {
is, cleanup := defaultImageStore(t)
func TestDeleteNotExisting(t *testing.T) {
imgStore, cleanup := defaultImageStore(t)
defer cleanup()
id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`))
_, err := imgStore.Delete(ID("i_dont_exists"))
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
}
func TestParentReset(t *testing.T) {
imgStore, cleanup := defaultImageStore(t)
defer cleanup()
id, err := imgStore.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`))
assert.NilError(t, err)
id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`))
id2, err := imgStore.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`))
assert.NilError(t, err)
id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`))
id3, err := imgStore.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`))
assert.NilError(t, err)
assert.Check(t, is.SetParent(id, id2))
assert.Check(t, cmp.Len(is.Children(id2), 1))
assert.Check(t, imgStore.SetParent(id, id2))
assert.Check(t, is.Len(imgStore.Children(id2), 1))
assert.Check(t, is.SetParent(id, id3))
assert.Check(t, cmp.Len(is.Children(id2), 0))
assert.Check(t, cmp.Len(is.Children(id3), 1))
assert.Check(t, imgStore.SetParent(id, id3))
assert.Check(t, is.Len(imgStore.Children(id2), 0))
assert.Check(t, is.Len(imgStore.Children(id3), 1))
}
func defaultImageStore(t *testing.T) (Store, func()) {
@ -165,13 +179,13 @@ func TestGetAndSetLastUpdated(t *testing.T) {
updated, err := store.GetLastUpdated(id)
assert.NilError(t, err)
assert.Check(t, cmp.Equal(updated.IsZero(), true))
assert.Check(t, is.Equal(updated.IsZero(), true))
assert.Check(t, store.SetLastUpdated(id))
updated, err = store.GetLastUpdated(id)
assert.NilError(t, err)
assert.Check(t, cmp.Equal(updated.IsZero(), false))
assert.Check(t, is.Equal(updated.IsZero(), false))
}
func TestStoreLen(t *testing.T) {

View file

@ -216,7 +216,7 @@ func (s *saveSession) save(outStream io.Writer) error {
}
manifest = append(manifest, manifestItem{
Config: id.Digest().Hex() + ".json",
Config: id.Digest().Encoded() + ".json",
RepoTags: repoTags,
Layers: layers,
LayerSources: foreignSrcs,
@ -304,9 +304,9 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc
return nil, err
}
v1Img.ID = v1ID.Hex()
v1Img.ID = v1ID.Encoded()
if parent != "" {
v1Img.Parent = parent.Hex()
v1Img.Parent = parent.Encoded()
}
v1Img.OS = img.OS
@ -324,8 +324,8 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc
}
}
configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json")
if err := os.WriteFile(configFile, img.RawJSON(), 0644); err != nil {
configFile := filepath.Join(s.outDir, id.Digest().Encoded()+".json")
if err := os.WriteFile(configFile, img.RawJSON(), 0o644); err != nil {
return nil, err
}
if err := system.Chtimes(configFile, img.Created, img.Created); err != nil {

View file

@ -323,7 +323,7 @@ func (s *DockerRegistrySuite) TestPullManifestList(c *testing.T) {
assert.NilError(c, err, "error marshalling manifest list")
manifestListDigest := digest.FromBytes(manifestListJSON)
hexDigest := manifestListDigest.Hex()
hexDigest := manifestListDigest.Encoded()
registryV2Path := s.reg.Path()

View file

@ -123,7 +123,7 @@ func (s *DockerCLISaveLoadSuite) TestSaveCheckTimes(c *testing.T) {
out, err = RunCommandPipelineWithOutput(
exec.Command(dockerBinary, "save", repoName),
exec.Command("tar", "tv"),
exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex())))
exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Encoded())))
assert.NilError(c, err, "failed to save repo with image ID and 'repositories' file: %s, %v", out, err)
}
@ -258,7 +258,7 @@ func (s *DockerCLISaveLoadSuite) TestSaveRepoWithMultipleImages(c *testing.T) {
// prefixes are not in tar
for i := range expected {
expected[i] = digest.Digest(expected[i]).Hex()
expected[i] = digest.Digest(expected[i]).Encoded()
}
sort.Strings(actual)

View file

@ -50,7 +50,7 @@ func newFSMetadataStore(root string) (*fileMetadataStore, error) {
func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string {
dgst := digest.Digest(layer)
return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex())
return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Encoded())
}
func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string {
@ -366,7 +366,7 @@ func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
for _, fi := range fileInfos {
if fi.IsDir() && fi.Name() != "mounts" {
dgst := digest.NewDigestFromHex(string(algorithm), fi.Name())
dgst := digest.NewDigestFromEncoded(algorithm, fi.Name())
if err := dgst.Validate(); err != nil {
logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
} else {

View file

@ -397,7 +397,7 @@ func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
var dir string
for {
dgst := digest.Digest(layer.chainID)
tmpID := fmt.Sprintf("%s-%s-removing", dgst.Hex(), stringid.GenerateRandomID())
tmpID := fmt.Sprintf("%s-%s-removing", dgst.Encoded(), stringid.GenerateRandomID())
dir = filepath.Join(ls.store.root, string(dgst.Algorithm()), tmpID)
err := os.Rename(ls.store.getLayerDirectory(layer.chainID), dir)
if os.IsExist(err) {

View file

@ -718,13 +718,13 @@ func TestTarStreamVerification(t *testing.T) {
id2 := digest.Digest(layer2.ChainID())
// Replace tar data files
src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz"))
src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Encoded(), "tar-split.json.gz"))
if err != nil {
t.Fatal(err)
}
defer src.Close()
dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz"))
dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Encoded(), "tar-split.json.gz"))
if err != nil {
t.Fatal(err)
}

View file

@ -171,7 +171,7 @@ func (r *V2) Close() {
func (r *V2) getBlobFilename(blobDigest digest.Digest) string {
// Split the digest into its algorithm and hex components.
dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Hex()
dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Encoded()
// The path to the target blob data looks something like:
// baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data"