Merge pull request #44428 from thaJeztah/fix_usage_of_deprecated_funcs
Remove uses of deprecated go-digest.NewDigestFromHex, go-digest.Digest.Hex
This commit is contained in:
commit
7bbf2fb652
13 changed files with 25 additions and 25 deletions
|
@ -109,7 +109,7 @@ func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.F
|
|||
}
|
||||
|
||||
hfi := &fileInfo{
|
||||
sum: h.Digest().Hex(),
|
||||
sum: h.Digest().Encoded(),
|
||||
}
|
||||
cs.txn.Insert([]byte(p), hfi)
|
||||
cs.mu.Unlock()
|
||||
|
|
|
@ -127,7 +127,7 @@ deleteImagesLoop:
|
|||
}
|
||||
}
|
||||
} else {
|
||||
hex := id.Digest().Hex()
|
||||
hex := id.Digest().Encoded()
|
||||
imgDel, err := i.ImageDelete(ctx, hex, false, true)
|
||||
if imageDeleteFailed(hex, err) {
|
||||
continue
|
||||
|
|
|
@ -117,11 +117,11 @@ func (serv *v2MetadataService) digestNamespace() string {
|
|||
}
|
||||
|
||||
func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string {
|
||||
return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
|
||||
return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Encoded()
|
||||
}
|
||||
|
||||
func (serv *v2MetadataService) digestKey(dgst digest.Digest) string {
|
||||
return string(dgst.Algorithm()) + "/" + dgst.Hex()
|
||||
return string(dgst.Algorithm()) + "/" + dgst.Encoded()
|
||||
}
|
||||
|
||||
// GetMetadata finds the metadata associated with a layer DiffID.
|
||||
|
|
|
@ -56,11 +56,11 @@ func newFSStore(root string) (*fs, error) {
|
|||
}
|
||||
|
||||
func (s *fs) contentFile(dgst digest.Digest) string {
|
||||
return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex())
|
||||
return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Encoded())
|
||||
}
|
||||
|
||||
func (s *fs) metadataDir(dgst digest.Digest) string {
|
||||
return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex())
|
||||
return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Encoded())
|
||||
}
|
||||
|
||||
// Walk calls the supplied callback for each image ID in the storage backend.
|
||||
|
@ -73,7 +73,7 @@ func (s *fs) Walk(f DigestWalkFunc) error {
|
|||
return err
|
||||
}
|
||||
for _, v := range dir {
|
||||
dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
|
||||
dgst := digest.NewDigestFromEncoded(digest.Canonical, v.Name())
|
||||
if err := dgst.Validate(); err != nil {
|
||||
logrus.Debugf("skipping invalid digest %s: %s", dgst, err)
|
||||
continue
|
||||
|
|
|
@ -31,7 +31,7 @@ func TestFSGetInvalidData(t *testing.T) {
|
|||
dgst, err := store.Set([]byte("foobar"))
|
||||
assert.Check(t, err)
|
||||
|
||||
err = os.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600)
|
||||
err = os.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Encoded()), []byte("foobar2"), 0o600)
|
||||
assert.Check(t, err)
|
||||
|
||||
_, err = store.Get(dgst)
|
||||
|
@ -43,7 +43,7 @@ func TestFSInvalidSet(t *testing.T) {
|
|||
defer cleanup()
|
||||
|
||||
id := digest.FromBytes([]byte("foobar"))
|
||||
err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Hex()), 0700)
|
||||
err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Encoded()), 0o700)
|
||||
assert.Check(t, err)
|
||||
|
||||
_, err = store.Set([]byte("foobar"))
|
||||
|
|
|
@ -64,11 +64,11 @@ func TestRestore(t *testing.T) {
|
|||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(ID(id1), sid1))
|
||||
|
||||
sid1, err = imgStore.Search(id1.Hex()[:6])
|
||||
sid1, err = imgStore.Search(id1.Encoded()[:6])
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(ID(id1), sid1))
|
||||
|
||||
invalidPattern := id1.Hex()[1:6]
|
||||
invalidPattern := id1.Encoded()[1:6]
|
||||
_, err = imgStore.Search(invalidPattern)
|
||||
assert.ErrorContains(t, err, "No such image")
|
||||
}
|
||||
|
|
|
@ -216,7 +216,7 @@ func (s *saveSession) save(outStream io.Writer) error {
|
|||
}
|
||||
|
||||
manifest = append(manifest, manifestItem{
|
||||
Config: id.Digest().Hex() + ".json",
|
||||
Config: id.Digest().Encoded() + ".json",
|
||||
RepoTags: repoTags,
|
||||
Layers: layers,
|
||||
LayerSources: foreignSrcs,
|
||||
|
@ -304,9 +304,9 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc
|
|||
return nil, err
|
||||
}
|
||||
|
||||
v1Img.ID = v1ID.Hex()
|
||||
v1Img.ID = v1ID.Encoded()
|
||||
if parent != "" {
|
||||
v1Img.Parent = parent.Hex()
|
||||
v1Img.Parent = parent.Encoded()
|
||||
}
|
||||
|
||||
v1Img.OS = img.OS
|
||||
|
@ -324,8 +324,8 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc
|
|||
}
|
||||
}
|
||||
|
||||
configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json")
|
||||
if err := os.WriteFile(configFile, img.RawJSON(), 0644); err != nil {
|
||||
configFile := filepath.Join(s.outDir, id.Digest().Encoded()+".json")
|
||||
if err := os.WriteFile(configFile, img.RawJSON(), 0o644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := system.Chtimes(configFile, img.Created, img.Created); err != nil {
|
||||
|
|
|
@ -323,7 +323,7 @@ func (s *DockerRegistrySuite) TestPullManifestList(c *testing.T) {
|
|||
assert.NilError(c, err, "error marshalling manifest list")
|
||||
|
||||
manifestListDigest := digest.FromBytes(manifestListJSON)
|
||||
hexDigest := manifestListDigest.Hex()
|
||||
hexDigest := manifestListDigest.Encoded()
|
||||
|
||||
registryV2Path := s.reg.Path()
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ func (s *DockerCLISaveLoadSuite) TestSaveCheckTimes(c *testing.T) {
|
|||
out, err = RunCommandPipelineWithOutput(
|
||||
exec.Command(dockerBinary, "save", repoName),
|
||||
exec.Command("tar", "tv"),
|
||||
exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex())))
|
||||
exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Encoded())))
|
||||
assert.NilError(c, err, "failed to save repo with image ID and 'repositories' file: %s, %v", out, err)
|
||||
}
|
||||
|
||||
|
@ -258,7 +258,7 @@ func (s *DockerCLISaveLoadSuite) TestSaveRepoWithMultipleImages(c *testing.T) {
|
|||
|
||||
// prefixes are not in tar
|
||||
for i := range expected {
|
||||
expected[i] = digest.Digest(expected[i]).Hex()
|
||||
expected[i] = digest.Digest(expected[i]).Encoded()
|
||||
}
|
||||
|
||||
sort.Strings(actual)
|
||||
|
|
|
@ -49,7 +49,7 @@ func newFSMetadataStore(root string) (*fileMetadataStore, error) {
|
|||
|
||||
func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string {
|
||||
dgst := digest.Digest(layer)
|
||||
return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex())
|
||||
return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Encoded())
|
||||
}
|
||||
|
||||
func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string {
|
||||
|
@ -364,7 +364,7 @@ func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
|
|||
|
||||
for _, fi := range fileInfos {
|
||||
if fi.IsDir() && fi.Name() != "mounts" {
|
||||
dgst := digest.NewDigestFromHex(string(algorithm), fi.Name())
|
||||
dgst := digest.NewDigestFromEncoded(algorithm, fi.Name())
|
||||
if err := dgst.Validate(); err != nil {
|
||||
logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
|
||||
} else {
|
||||
|
|
|
@ -397,7 +397,7 @@ func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
|
|||
var dir string
|
||||
for {
|
||||
dgst := digest.Digest(layer.chainID)
|
||||
tmpID := fmt.Sprintf("%s-%s-removing", dgst.Hex(), stringid.GenerateRandomID())
|
||||
tmpID := fmt.Sprintf("%s-%s-removing", dgst.Encoded(), stringid.GenerateRandomID())
|
||||
dir = filepath.Join(ls.store.root, string(dgst.Algorithm()), tmpID)
|
||||
err := os.Rename(ls.store.getLayerDirectory(layer.chainID), dir)
|
||||
if os.IsExist(err) {
|
||||
|
|
|
@ -717,13 +717,13 @@ func TestTarStreamVerification(t *testing.T) {
|
|||
id2 := digest.Digest(layer2.ChainID())
|
||||
|
||||
// Replace tar data files
|
||||
src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz"))
|
||||
src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Encoded(), "tar-split.json.gz"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz"))
|
||||
dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Encoded(), "tar-split.json.gz"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -171,7 +171,7 @@ func (r *V2) Close() {
|
|||
|
||||
func (r *V2) getBlobFilename(blobDigest digest.Digest) string {
|
||||
// Split the digest into its algorithm and hex components.
|
||||
dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Hex()
|
||||
dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Encoded()
|
||||
|
||||
// The path to the target blob data looks something like:
|
||||
// baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data"
|
||||
|
|
Loading…
Reference in a new issue