Merge pull request #44428 from thaJeztah/fix_usage_of_deprecated_funcs

Remove uses of deprecated go-digest.NewDigestFromHex, go-digest.Digest.Hex
This commit is contained in:
Sebastiaan van Stijn 2022-11-10 17:52:43 +01:00 committed by GitHub
commit 7bbf2fb652
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 25 additions and 25 deletions

View file

@ -109,7 +109,7 @@ func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.F
} }
hfi := &fileInfo{ hfi := &fileInfo{
sum: h.Digest().Hex(), sum: h.Digest().Encoded(),
} }
cs.txn.Insert([]byte(p), hfi) cs.txn.Insert([]byte(p), hfi)
cs.mu.Unlock() cs.mu.Unlock()

View file

@ -127,7 +127,7 @@ deleteImagesLoop:
} }
} }
} else { } else {
hex := id.Digest().Hex() hex := id.Digest().Encoded()
imgDel, err := i.ImageDelete(ctx, hex, false, true) imgDel, err := i.ImageDelete(ctx, hex, false, true)
if imageDeleteFailed(hex, err) { if imageDeleteFailed(hex, err) {
continue continue

View file

@ -117,11 +117,11 @@ func (serv *v2MetadataService) digestNamespace() string {
} }
func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string { func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string {
return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Encoded()
} }
func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { func (serv *v2MetadataService) digestKey(dgst digest.Digest) string {
return string(dgst.Algorithm()) + "/" + dgst.Hex() return string(dgst.Algorithm()) + "/" + dgst.Encoded()
} }
// GetMetadata finds the metadata associated with a layer DiffID. // GetMetadata finds the metadata associated with a layer DiffID.

View file

@ -56,11 +56,11 @@ func newFSStore(root string) (*fs, error) {
} }
func (s *fs) contentFile(dgst digest.Digest) string { func (s *fs) contentFile(dgst digest.Digest) string {
return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Encoded())
} }
func (s *fs) metadataDir(dgst digest.Digest) string { func (s *fs) metadataDir(dgst digest.Digest) string {
return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Encoded())
} }
// Walk calls the supplied callback for each image ID in the storage backend. // Walk calls the supplied callback for each image ID in the storage backend.
@ -73,7 +73,7 @@ func (s *fs) Walk(f DigestWalkFunc) error {
return err return err
} }
for _, v := range dir { for _, v := range dir {
dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) dgst := digest.NewDigestFromEncoded(digest.Canonical, v.Name())
if err := dgst.Validate(); err != nil { if err := dgst.Validate(); err != nil {
logrus.Debugf("skipping invalid digest %s: %s", dgst, err) logrus.Debugf("skipping invalid digest %s: %s", dgst, err)
continue continue

View file

@ -31,7 +31,7 @@ func TestFSGetInvalidData(t *testing.T) {
dgst, err := store.Set([]byte("foobar")) dgst, err := store.Set([]byte("foobar"))
assert.Check(t, err) assert.Check(t, err)
err = os.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600) err = os.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Encoded()), []byte("foobar2"), 0o600)
assert.Check(t, err) assert.Check(t, err)
_, err = store.Get(dgst) _, err = store.Get(dgst)
@ -43,7 +43,7 @@ func TestFSInvalidSet(t *testing.T) {
defer cleanup() defer cleanup()
id := digest.FromBytes([]byte("foobar")) id := digest.FromBytes([]byte("foobar"))
err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Hex()), 0700) err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Encoded()), 0o700)
assert.Check(t, err) assert.Check(t, err)
_, err = store.Set([]byte("foobar")) _, err = store.Set([]byte("foobar"))

View file

@ -64,11 +64,11 @@ func TestRestore(t *testing.T) {
assert.NilError(t, err) assert.NilError(t, err)
assert.Check(t, is.Equal(ID(id1), sid1)) assert.Check(t, is.Equal(ID(id1), sid1))
sid1, err = imgStore.Search(id1.Hex()[:6]) sid1, err = imgStore.Search(id1.Encoded()[:6])
assert.NilError(t, err) assert.NilError(t, err)
assert.Check(t, is.Equal(ID(id1), sid1)) assert.Check(t, is.Equal(ID(id1), sid1))
invalidPattern := id1.Hex()[1:6] invalidPattern := id1.Encoded()[1:6]
_, err = imgStore.Search(invalidPattern) _, err = imgStore.Search(invalidPattern)
assert.ErrorContains(t, err, "No such image") assert.ErrorContains(t, err, "No such image")
} }

View file

@ -216,7 +216,7 @@ func (s *saveSession) save(outStream io.Writer) error {
} }
manifest = append(manifest, manifestItem{ manifest = append(manifest, manifestItem{
Config: id.Digest().Hex() + ".json", Config: id.Digest().Encoded() + ".json",
RepoTags: repoTags, RepoTags: repoTags,
Layers: layers, Layers: layers,
LayerSources: foreignSrcs, LayerSources: foreignSrcs,
@ -304,9 +304,9 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc
return nil, err return nil, err
} }
v1Img.ID = v1ID.Hex() v1Img.ID = v1ID.Encoded()
if parent != "" { if parent != "" {
v1Img.Parent = parent.Hex() v1Img.Parent = parent.Encoded()
} }
v1Img.OS = img.OS v1Img.OS = img.OS
@ -324,8 +324,8 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc
} }
} }
configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json") configFile := filepath.Join(s.outDir, id.Digest().Encoded()+".json")
if err := os.WriteFile(configFile, img.RawJSON(), 0644); err != nil { if err := os.WriteFile(configFile, img.RawJSON(), 0o644); err != nil {
return nil, err return nil, err
} }
if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { if err := system.Chtimes(configFile, img.Created, img.Created); err != nil {

View file

@ -323,7 +323,7 @@ func (s *DockerRegistrySuite) TestPullManifestList(c *testing.T) {
assert.NilError(c, err, "error marshalling manifest list") assert.NilError(c, err, "error marshalling manifest list")
manifestListDigest := digest.FromBytes(manifestListJSON) manifestListDigest := digest.FromBytes(manifestListJSON)
hexDigest := manifestListDigest.Hex() hexDigest := manifestListDigest.Encoded()
registryV2Path := s.reg.Path() registryV2Path := s.reg.Path()

View file

@ -123,7 +123,7 @@ func (s *DockerCLISaveLoadSuite) TestSaveCheckTimes(c *testing.T) {
out, err = RunCommandPipelineWithOutput( out, err = RunCommandPipelineWithOutput(
exec.Command(dockerBinary, "save", repoName), exec.Command(dockerBinary, "save", repoName),
exec.Command("tar", "tv"), exec.Command("tar", "tv"),
exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex()))) exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Encoded())))
assert.NilError(c, err, "failed to save repo with image ID and 'repositories' file: %s, %v", out, err) assert.NilError(c, err, "failed to save repo with image ID and 'repositories' file: %s, %v", out, err)
} }
@ -258,7 +258,7 @@ func (s *DockerCLISaveLoadSuite) TestSaveRepoWithMultipleImages(c *testing.T) {
// prefixes are not in tar // prefixes are not in tar
for i := range expected { for i := range expected {
expected[i] = digest.Digest(expected[i]).Hex() expected[i] = digest.Digest(expected[i]).Encoded()
} }
sort.Strings(actual) sort.Strings(actual)

View file

@ -49,7 +49,7 @@ func newFSMetadataStore(root string) (*fileMetadataStore, error) {
func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string {
dgst := digest.Digest(layer) dgst := digest.Digest(layer)
return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Encoded())
} }
func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string {
@ -364,7 +364,7 @@ func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
for _, fi := range fileInfos { for _, fi := range fileInfos {
if fi.IsDir() && fi.Name() != "mounts" { if fi.IsDir() && fi.Name() != "mounts" {
dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) dgst := digest.NewDigestFromEncoded(algorithm, fi.Name())
if err := dgst.Validate(); err != nil { if err := dgst.Validate(); err != nil {
logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
} else { } else {

View file

@ -397,7 +397,7 @@ func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
var dir string var dir string
for { for {
dgst := digest.Digest(layer.chainID) dgst := digest.Digest(layer.chainID)
tmpID := fmt.Sprintf("%s-%s-removing", dgst.Hex(), stringid.GenerateRandomID()) tmpID := fmt.Sprintf("%s-%s-removing", dgst.Encoded(), stringid.GenerateRandomID())
dir = filepath.Join(ls.store.root, string(dgst.Algorithm()), tmpID) dir = filepath.Join(ls.store.root, string(dgst.Algorithm()), tmpID)
err := os.Rename(ls.store.getLayerDirectory(layer.chainID), dir) err := os.Rename(ls.store.getLayerDirectory(layer.chainID), dir)
if os.IsExist(err) { if os.IsExist(err) {

View file

@ -717,13 +717,13 @@ func TestTarStreamVerification(t *testing.T) {
id2 := digest.Digest(layer2.ChainID()) id2 := digest.Digest(layer2.ChainID())
// Replace tar data files // Replace tar data files
src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz")) src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Encoded(), "tar-split.json.gz"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer src.Close() defer src.Close()
dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz")) dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Encoded(), "tar-split.json.gz"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -171,7 +171,7 @@ func (r *V2) Close() {
func (r *V2) getBlobFilename(blobDigest digest.Digest) string { func (r *V2) getBlobFilename(blobDigest digest.Digest) string {
// Split the digest into its algorithm and hex components. // Split the digest into its algorithm and hex components.
dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Hex() dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Encoded()
// The path to the target blob data looks something like: // The path to the target blob data looks something like:
// baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data" // baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data"