Generalize content addressable and reference storage
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
parent
a7c25f9540
commit
805223982c
21 changed files with 184 additions and 174 deletions
|
@ -27,18 +27,18 @@ func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) {
|
|||
return "", err
|
||||
}
|
||||
if id != "" {
|
||||
if _, err := daemon.imageStore.Get(image.ID(id)); err != nil {
|
||||
if _, err := daemon.imageStore.Get(image.IDFromDigest(id)); err != nil {
|
||||
return "", ErrImageDoesNotExist{refOrID}
|
||||
}
|
||||
return image.ID(id), nil
|
||||
return image.IDFromDigest(id), nil
|
||||
}
|
||||
|
||||
if id, err := daemon.referenceStore.Get(ref); err == nil {
|
||||
return id, nil
|
||||
return image.IDFromDigest(id), nil
|
||||
}
|
||||
if tagged, ok := ref.(reference.NamedTagged); ok {
|
||||
if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil {
|
||||
for _, namedRef := range daemon.referenceStore.References(id) {
|
||||
for _, namedRef := range daemon.referenceStore.References(id.Digest()) {
|
||||
if namedRef.Name() == ref.Name() {
|
||||
return id, nil
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
|
|||
return nil, daemon.imageNotExistToErrcode(err)
|
||||
}
|
||||
|
||||
repoRefs := daemon.referenceStore.References(imgID)
|
||||
repoRefs := daemon.referenceStore.References(imgID.Digest())
|
||||
|
||||
var removedRepositoryRef bool
|
||||
if !isImageIDPrefix(imgID.String(), imageRef) {
|
||||
|
@ -102,7 +102,7 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I
|
|||
daemon.LogImageEvent(imgID.String(), imgID.String(), "untag")
|
||||
records = append(records, untaggedRecord)
|
||||
|
||||
repoRefs = daemon.referenceStore.References(imgID)
|
||||
repoRefs = daemon.referenceStore.References(imgID.Digest())
|
||||
|
||||
// If a tag reference was removed and the only remaining
|
||||
// references to the same repository are digest references,
|
||||
|
@ -239,7 +239,7 @@ func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, erro
|
|||
// daemon's event service. An "Untagged" types.ImageDelete is added to the
|
||||
// given list of records.
|
||||
func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error {
|
||||
imageRefs := daemon.referenceStore.References(imgID)
|
||||
imageRefs := daemon.referenceStore.References(imgID.Digest())
|
||||
|
||||
for _, imageRef := range imageRefs {
|
||||
parsedRef, err := daemon.removeImageRef(imageRef)
|
||||
|
@ -372,7 +372,7 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
|
|||
}
|
||||
|
||||
// Check if any repository tags/digest reference this image.
|
||||
if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 {
|
||||
if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID.Digest())) > 0 {
|
||||
return &imageDeleteConflict{
|
||||
imgID: imgID,
|
||||
message: "image is referenced in multiple repositories",
|
||||
|
@ -400,5 +400,5 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType
|
|||
// that there are no repository references to the given image and it has no
|
||||
// child images.
|
||||
func (daemon *Daemon) imageIsDangling(imgID image.ID) bool {
|
||||
return !(len(daemon.referenceStore.References(imgID)) > 0 || len(daemon.imageStore.Children(imgID)) > 0)
|
||||
return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.imageStore.Children(imgID)) > 0)
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) {
|
|||
h.ID = id.String()
|
||||
|
||||
var tags []string
|
||||
for _, r := range daemon.referenceStore.References(id) {
|
||||
for _, r := range daemon.referenceStore.References(id.Digest()) {
|
||||
if _, ok := r.(reference.NamedTagged); ok {
|
||||
tags = append(tags, r.String())
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) {
|
|||
return nil, fmt.Errorf("No such image: %s", name)
|
||||
}
|
||||
|
||||
refs := daemon.referenceStore.References(img.ID())
|
||||
refs := daemon.referenceStore.References(img.ID().Digest())
|
||||
repoTags := []string{}
|
||||
repoDigests := []string{}
|
||||
for _, ref := range refs {
|
||||
|
|
|
@ -28,7 +28,7 @@ func (daemon *Daemon) TagImage(imageName, repository, tag string) error {
|
|||
|
||||
// TagImageWithReference adds the given reference to the image ID provided.
|
||||
func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error {
|
||||
if err := daemon.referenceStore.AddTag(newTag, imageID, true); err != nil {
|
||||
if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Imag
|
|||
|
||||
newImage := newImage(img, size)
|
||||
|
||||
for _, ref := range daemon.referenceStore.References(id) {
|
||||
for _, ref := range daemon.referenceStore.References(id.Digest()) {
|
||||
if filter != "" { // filter by tag/repo name
|
||||
if filterTagged { // filter by tag, require full ref match
|
||||
if ref.String() != filter {
|
||||
|
|
|
@ -205,21 +205,21 @@ func ValidateRepoName(name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func addDigestReference(store reference.Store, ref reference.Named, dgst digest.Digest, imageID image.ID) error {
|
||||
func addDigestReference(store reference.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error {
|
||||
dgstRef, err := reference.WithDigest(ref, dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if oldTagImageID, err := store.Get(dgstRef); err == nil {
|
||||
if oldTagImageID != imageID {
|
||||
if oldTagID, err := store.Get(dgstRef); err == nil {
|
||||
if oldTagID != id {
|
||||
// Updating digests not supported by reference store
|
||||
logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagImageID, imageID)
|
||||
logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id)
|
||||
}
|
||||
return nil
|
||||
} else if err != reference.ErrDoesNotExist {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.AddDigest(dgstRef, imageID, true)
|
||||
return store.AddDigest(dgstRef, id, true)
|
||||
}
|
||||
|
|
|
@ -248,7 +248,7 @@ func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNa
|
|||
return err
|
||||
}
|
||||
|
||||
if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil {
|
||||
if err := p.config.ReferenceStore.AddTag(localNameRef, imageID.Digest(), true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -374,23 +374,23 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
|
|||
progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name())
|
||||
|
||||
var (
|
||||
imageID image.ID
|
||||
id digest.Digest
|
||||
manifestDigest digest.Digest
|
||||
)
|
||||
|
||||
switch v := manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v)
|
||||
id, manifestDigest, err = p.pullSchema1(ctx, ref, v)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
case *schema2.DeserializedManifest:
|
||||
imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v)
|
||||
id, manifestDigest, err = p.pullSchema2(ctx, ref, v)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
case *manifestlist.DeserializedManifestList:
|
||||
imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v)
|
||||
id, manifestDigest, err = p.pullManifestList(ctx, ref, v)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -400,31 +400,31 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
|
|||
|
||||
progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
|
||||
|
||||
oldTagImageID, err := p.config.ReferenceStore.Get(ref)
|
||||
oldTagID, err := p.config.ReferenceStore.Get(ref)
|
||||
if err == nil {
|
||||
if oldTagImageID == imageID {
|
||||
return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID)
|
||||
if oldTagID == id {
|
||||
return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
|
||||
}
|
||||
} else if err != reference.ErrDoesNotExist {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if canonical, ok := ref.(reference.Canonical); ok {
|
||||
if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
|
||||
if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID); err != nil {
|
||||
if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
|
||||
if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
|
||||
func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
|
||||
var verifiedManifest *schema1.Manifest
|
||||
verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
|
||||
if err != nil {
|
||||
|
@ -487,28 +487,27 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverif
|
|||
return "", "", err
|
||||
}
|
||||
|
||||
imageID, err = p.config.ImageStore.Create(config)
|
||||
imageID, err := p.config.ImageStore.Create(config)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
|
||||
|
||||
return imageID, manifestDigest, nil
|
||||
return imageID.Digest(), manifestDigest, nil
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
|
||||
func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
|
||||
manifestDigest, err = schema2ManifestDigest(ref, mfst)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
target := mfst.Target()
|
||||
imageID = image.ID(target.Digest)
|
||||
if _, err := p.config.ImageStore.Get(imageID); err == nil {
|
||||
if _, err := p.config.ImageStore.Get(image.IDFromDigest(target.Digest)); err == nil {
|
||||
// If the image already exists locally, no need to pull
|
||||
// anything.
|
||||
return imageID, manifestDigest, nil
|
||||
return target.Digest, manifestDigest, nil
|
||||
}
|
||||
|
||||
var descriptors []xfer.DownloadDescriptor
|
||||
|
@ -534,7 +533,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
|
|||
|
||||
// Pull the image config
|
||||
go func() {
|
||||
configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest)
|
||||
configJSON, err := p.pullSchema2Config(ctx, target.Digest)
|
||||
if err != nil {
|
||||
errChan <- ImageConfigPullError{Err: err}
|
||||
cancel()
|
||||
|
@ -618,12 +617,12 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
|
|||
}
|
||||
}
|
||||
|
||||
imageID, err = p.config.ImageStore.Create(configJSON)
|
||||
imageID, err := p.config.ImageStore.Create(configJSON)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return imageID, manifestDigest, nil
|
||||
return imageID.Digest(), manifestDigest, nil
|
||||
}
|
||||
|
||||
func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
|
||||
|
@ -643,7 +642,7 @@ func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, imag
|
|||
|
||||
// pullManifestList handles "manifest lists" which point to various
|
||||
// platform-specifc manifests.
|
||||
func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) {
|
||||
func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) {
|
||||
manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
|
@ -681,12 +680,12 @@ func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mf
|
|||
|
||||
switch v := manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
imageID, _, err = p.pullSchema1(ctx, manifestRef, v)
|
||||
id, _, err = p.pullSchema1(ctx, manifestRef, v)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
case *schema2.DeserializedManifest:
|
||||
imageID, _, err = p.pullSchema2(ctx, manifestRef, v)
|
||||
id, _, err = p.pullSchema2(ctx, manifestRef, v)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
@ -694,10 +693,10 @@ func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mf
|
|||
return "", "", errors.New("unsupported manifest format")
|
||||
}
|
||||
|
||||
return imageID, manifestListDigest, err
|
||||
return id, manifestListDigest, err
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
|
||||
func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
|
||||
blobs := p.repo.Blobs(ctx)
|
||||
configJSON, err = blobs.Get(ctx, dgst)
|
||||
if err != nil {
|
||||
|
|
|
@ -92,7 +92,7 @@ type v1TopImage struct {
|
|||
}
|
||||
|
||||
func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) {
|
||||
v1ID := digest.Digest(imageID).Hex()
|
||||
v1ID := imageID.Digest().Hex()
|
||||
parentV1ID := ""
|
||||
if parent != nil {
|
||||
parentV1ID = parent.V1ID()
|
||||
|
@ -149,10 +149,12 @@ func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID
|
|||
if isTagged {
|
||||
// Push a specific tag
|
||||
var imgID image.ID
|
||||
imgID, err = p.config.ReferenceStore.Get(p.ref)
|
||||
var dgst digest.Digest
|
||||
dgst, err = p.config.ReferenceStore.Get(p.ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
imgID = image.IDFromDigest(dgst)
|
||||
|
||||
imageList, err = p.imageListForTag(imgID, nil, &referencedLayers)
|
||||
if err != nil {
|
||||
|
@ -164,7 +166,7 @@ func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID
|
|||
return
|
||||
}
|
||||
|
||||
imagesSeen := make(map[image.ID]struct{})
|
||||
imagesSeen := make(map[digest.Digest]struct{})
|
||||
dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage)
|
||||
|
||||
associations := p.config.ReferenceStore.ReferencesByName(p.ref)
|
||||
|
@ -174,15 +176,16 @@ func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID
|
|||
continue
|
||||
}
|
||||
|
||||
tagsByImage[association.ImageID] = append(tagsByImage[association.ImageID], tagged.Tag())
|
||||
imgID := image.IDFromDigest(association.ID)
|
||||
tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag())
|
||||
|
||||
if _, present := imagesSeen[association.ImageID]; present {
|
||||
if _, present := imagesSeen[association.ID]; present {
|
||||
// Skip generating image list for already-seen image
|
||||
continue
|
||||
}
|
||||
imagesSeen[association.ImageID] = struct{}{}
|
||||
imagesSeen[association.ID] = struct{}{}
|
||||
|
||||
imageListForThisTag, err := p.imageListForTag(association.ImageID, dependenciesSeen, &referencedLayers)
|
||||
imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) {
|
|||
for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) {
|
||||
if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged {
|
||||
pushed++
|
||||
if err := p.pushV2Tag(ctx, namedTagged, association.ImageID); err != nil {
|
||||
if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -112,10 +112,10 @@ func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, imageID image.ID) error {
|
||||
func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error {
|
||||
logrus.Debugf("Pushing repository: %s", ref.String())
|
||||
|
||||
img, err := p.config.ImageStore.Get(imageID)
|
||||
img, err := p.config.ImageStore.Get(image.IDFromDigest(id))
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err)
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, ima
|
|||
manifestDigest := digest.FromBytes(canonicalManifest)
|
||||
progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest))
|
||||
|
||||
if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID); err != nil {
|
||||
if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
82
image/fs.go
82
image/fs.go
|
@ -12,18 +12,18 @@ import (
|
|||
"github.com/docker/docker/pkg/ioutils"
|
||||
)
|
||||
|
||||
// IDWalkFunc is function called by StoreBackend.Walk
|
||||
type IDWalkFunc func(id ID) error
|
||||
// DigestWalkFunc is function called by StoreBackend.Walk
|
||||
type DigestWalkFunc func(id digest.Digest) error
|
||||
|
||||
// StoreBackend provides interface for image.Store persistence
|
||||
type StoreBackend interface {
|
||||
Walk(f IDWalkFunc) error
|
||||
Get(id ID) ([]byte, error)
|
||||
Set(data []byte) (ID, error)
|
||||
Delete(id ID) error
|
||||
SetMetadata(id ID, key string, data []byte) error
|
||||
GetMetadata(id ID, key string) ([]byte, error)
|
||||
DeleteMetadata(id ID, key string) error
|
||||
Walk(f DigestWalkFunc) error
|
||||
Get(id digest.Digest) ([]byte, error)
|
||||
Set(data []byte) (digest.Digest, error)
|
||||
Delete(id digest.Digest) error
|
||||
SetMetadata(id digest.Digest, key string, data []byte) error
|
||||
GetMetadata(id digest.Digest, key string) ([]byte, error)
|
||||
DeleteMetadata(id digest.Digest, key string) error
|
||||
}
|
||||
|
||||
// fs implements StoreBackend using the filesystem.
|
||||
|
@ -55,18 +55,16 @@ func newFSStore(root string) (*fs, error) {
|
|||
return s, nil
|
||||
}
|
||||
|
||||
func (s *fs) contentFile(id ID) string {
|
||||
dgst := digest.Digest(id)
|
||||
func (s *fs) contentFile(dgst digest.Digest) string {
|
||||
return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex())
|
||||
}
|
||||
|
||||
func (s *fs) metadataDir(id ID) string {
|
||||
dgst := digest.Digest(id)
|
||||
func (s *fs) metadataDir(dgst digest.Digest) string {
|
||||
return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex())
|
||||
}
|
||||
|
||||
// Walk calls the supplied callback for each image ID in the storage backend.
|
||||
func (s *fs) Walk(f IDWalkFunc) error {
|
||||
func (s *fs) Walk(f DigestWalkFunc) error {
|
||||
// Only Canonical digest (sha256) is currently supported
|
||||
s.RLock()
|
||||
dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical)))
|
||||
|
@ -80,37 +78,37 @@ func (s *fs) Walk(f IDWalkFunc) error {
|
|||
logrus.Debugf("Skipping invalid digest %s: %s", dgst, err)
|
||||
continue
|
||||
}
|
||||
if err := f(ID(dgst)); err != nil {
|
||||
if err := f(dgst); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the content stored under a given ID.
|
||||
func (s *fs) Get(id ID) ([]byte, error) {
|
||||
// Get returns the content stored under a given digest.
|
||||
func (s *fs) Get(dgst digest.Digest) ([]byte, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.get(id)
|
||||
return s.get(dgst)
|
||||
}
|
||||
|
||||
func (s *fs) get(id ID) ([]byte, error) {
|
||||
content, err := ioutil.ReadFile(s.contentFile(id))
|
||||
func (s *fs) get(dgst digest.Digest) ([]byte, error) {
|
||||
content, err := ioutil.ReadFile(s.contentFile(dgst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// todo: maybe optional
|
||||
if ID(digest.FromBytes(content)) != id {
|
||||
return nil, fmt.Errorf("failed to verify image: %v", id)
|
||||
if digest.FromBytes(content) != dgst {
|
||||
return nil, fmt.Errorf("failed to verify: %v", dgst)
|
||||
}
|
||||
|
||||
return content, nil
|
||||
}
|
||||
|
||||
// Set stores content under a given ID.
|
||||
func (s *fs) Set(data []byte) (ID, error) {
|
||||
// Set stores content by checksum.
|
||||
func (s *fs) Set(data []byte) (digest.Digest, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
|
@ -118,58 +116,58 @@ func (s *fs) Set(data []byte) (ID, error) {
|
|||
return "", fmt.Errorf("Invalid empty data")
|
||||
}
|
||||
|
||||
id := ID(digest.FromBytes(data))
|
||||
if err := ioutils.AtomicWriteFile(s.contentFile(id), data, 0600); err != nil {
|
||||
dgst := digest.FromBytes(data)
|
||||
if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return id, nil
|
||||
return dgst, nil
|
||||
}
|
||||
|
||||
// Delete removes content and metadata files associated with the ID.
|
||||
func (s *fs) Delete(id ID) error {
|
||||
// Delete removes content and metadata files associated with the digest.
|
||||
func (s *fs) Delete(dgst digest.Digest) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if err := os.RemoveAll(s.metadataDir(id)); err != nil {
|
||||
if err := os.RemoveAll(s.metadataDir(dgst)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(s.contentFile(id)); err != nil {
|
||||
if err := os.Remove(s.contentFile(dgst)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for a given ID. It fails if there's no base file.
|
||||
func (s *fs) SetMetadata(id ID, key string, data []byte) error {
|
||||
func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if _, err := s.get(id); err != nil {
|
||||
if _, err := s.get(dgst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
baseDir := filepath.Join(s.metadataDir(id))
|
||||
baseDir := filepath.Join(s.metadataDir(dgst))
|
||||
if err := os.MkdirAll(baseDir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(id), key), data, 0600)
|
||||
return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600)
|
||||
}
|
||||
|
||||
// GetMetadata returns metadata for a given ID.
|
||||
func (s *fs) GetMetadata(id ID, key string) ([]byte, error) {
|
||||
// GetMetadata returns metadata for a given digest.
|
||||
func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
if _, err := s.get(id); err != nil {
|
||||
if _, err := s.get(dgst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ioutil.ReadFile(filepath.Join(s.metadataDir(id), key))
|
||||
return ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key))
|
||||
}
|
||||
|
||||
// DeleteMetadata removes the metadata associated with an ID.
|
||||
func (s *fs) DeleteMetadata(id ID, key string) error {
|
||||
// DeleteMetadata removes the metadata associated with a digest.
|
||||
func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
return os.RemoveAll(filepath.Join(s.metadataDir(id), key))
|
||||
return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key))
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ func testMetadataGetSet(t *testing.T, store StoreBackend) {
|
|||
}
|
||||
|
||||
tcases := []struct {
|
||||
id ID
|
||||
id digest.Digest
|
||||
key string
|
||||
value []byte
|
||||
}{
|
||||
|
@ -158,12 +158,12 @@ func testMetadataGetSet(t *testing.T, store StoreBackend) {
|
|||
}
|
||||
|
||||
id3 := digest.FromBytes([]byte("baz"))
|
||||
err = store.SetMetadata(ID(id3), "tkey", []byte("tval"))
|
||||
err = store.SetMetadata(id3, "tkey", []byte("tval"))
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for setting metadata for unknown ID.")
|
||||
}
|
||||
|
||||
_, err = store.GetMetadata(ID(id3), "tkey")
|
||||
_, err = store.GetMetadata(id3, "tkey")
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for getting metadata for unknown ID.")
|
||||
}
|
||||
|
@ -232,7 +232,7 @@ func TestFSInvalidWalker(t *testing.T) {
|
|||
}
|
||||
|
||||
n := 0
|
||||
err = fs.Walk(func(id ID) error {
|
||||
err = fs.Walk(func(id digest.Digest) error {
|
||||
if id != fooID {
|
||||
t.Fatalf("Invalid walker ID %q, expected %q", id, fooID)
|
||||
}
|
||||
|
@ -250,10 +250,10 @@ func TestFSInvalidWalker(t *testing.T) {
|
|||
func testGetSet(t *testing.T, store StoreBackend) {
|
||||
type tcase struct {
|
||||
input []byte
|
||||
expected ID
|
||||
expected digest.Digest
|
||||
}
|
||||
tcases := []tcase{
|
||||
{[]byte("foobar"), ID("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")},
|
||||
{[]byte("foobar"), digest.Digest("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")},
|
||||
}
|
||||
|
||||
randomInput := make([]byte, 8*1024)
|
||||
|
@ -269,7 +269,7 @@ func testGetSet(t *testing.T, store StoreBackend) {
|
|||
}
|
||||
tcases = append(tcases, tcase{
|
||||
input: randomInput,
|
||||
expected: ID("sha256:" + hex.EncodeToString(h.Sum(nil))),
|
||||
expected: digest.Digest("sha256:" + hex.EncodeToString(h.Sum(nil))),
|
||||
})
|
||||
|
||||
for _, tc := range tcases {
|
||||
|
@ -299,7 +299,7 @@ func testGetSet(t *testing.T, store StoreBackend) {
|
|||
}
|
||||
}
|
||||
|
||||
for _, key := range []ID{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} {
|
||||
for _, key := range []digest.Digest{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} {
|
||||
_, err := store.Get(key)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error for ID %q.", key)
|
||||
|
@ -352,11 +352,11 @@ func testWalker(t *testing.T, store StoreBackend) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tcases := make(map[ID]struct{})
|
||||
tcases := make(map[digest.Digest]struct{})
|
||||
tcases[id] = struct{}{}
|
||||
tcases[id2] = struct{}{}
|
||||
n := 0
|
||||
err = store.Walk(func(id ID) error {
|
||||
err = store.Walk(func(id digest.Digest) error {
|
||||
delete(tcases, id)
|
||||
n++
|
||||
return nil
|
||||
|
@ -373,9 +373,9 @@ func testWalker(t *testing.T, store StoreBackend) {
|
|||
}
|
||||
|
||||
// stop on error
|
||||
tcases = make(map[ID]struct{})
|
||||
tcases = make(map[digest.Digest]struct{})
|
||||
tcases[id] = struct{}{}
|
||||
err = store.Walk(func(id ID) error {
|
||||
err = store.Walk(func(id digest.Digest) error {
|
||||
return errors.New("")
|
||||
})
|
||||
if err == nil {
|
||||
|
|
|
@ -14,7 +14,17 @@ import (
|
|||
type ID digest.Digest
|
||||
|
||||
func (id ID) String() string {
|
||||
return digest.Digest(id).String()
|
||||
return id.Digest().String()
|
||||
}
|
||||
|
||||
// Digest converts ID into a digest
|
||||
func (id ID) Digest() digest.Digest {
|
||||
return digest.Digest(id)
|
||||
}
|
||||
|
||||
// IDFromDigest creates an ID from a digest
|
||||
func IDFromDigest(digest digest.Digest) ID {
|
||||
return ID(digest)
|
||||
}
|
||||
|
||||
// V1Image stores the V1 image configuration.
|
||||
|
@ -72,9 +82,9 @@ func (img *Image) ID() ID {
|
|||
return img.computedID
|
||||
}
|
||||
|
||||
// ImageID stringizes ID.
|
||||
// ImageID stringifies ID.
|
||||
func (img *Image) ImageID() string {
|
||||
return string(img.ID())
|
||||
return img.ID().String()
|
||||
}
|
||||
|
||||
// RunConfig returns the image's container config.
|
||||
|
|
|
@ -61,10 +61,10 @@ func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) {
|
|||
}
|
||||
|
||||
func (is *store) restore() error {
|
||||
err := is.fs.Walk(func(id ID) error {
|
||||
img, err := is.Get(id)
|
||||
err := is.fs.Walk(func(dgst digest.Digest) error {
|
||||
img, err := is.Get(IDFromDigest(dgst))
|
||||
if err != nil {
|
||||
logrus.Errorf("invalid image %v, %v", id, err)
|
||||
logrus.Errorf("invalid image %v, %v", dgst, err)
|
||||
return nil
|
||||
}
|
||||
var l layer.Layer
|
||||
|
@ -74,7 +74,7 @@ func (is *store) restore() error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := is.digestSet.Add(digest.Digest(id)); err != nil {
|
||||
if err := is.digestSet.Add(dgst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ func (is *store) restore() error {
|
|||
children: make(map[ID]struct{}),
|
||||
}
|
||||
|
||||
is.images[ID(id)] = imageMeta
|
||||
is.images[IDFromDigest(dgst)] = imageMeta
|
||||
|
||||
return nil
|
||||
})
|
||||
|
@ -131,7 +131,7 @@ func (is *store) Create(config []byte) (ID, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
imageID := ID(dgst)
|
||||
imageID := IDFromDigest(dgst)
|
||||
|
||||
is.Lock()
|
||||
defer is.Unlock()
|
||||
|
@ -156,7 +156,7 @@ func (is *store) Create(config []byte) (ID, error) {
|
|||
}
|
||||
|
||||
is.images[imageID] = imageMeta
|
||||
if err := is.digestSet.Add(digest.Digest(imageID)); err != nil {
|
||||
if err := is.digestSet.Add(imageID.Digest()); err != nil {
|
||||
delete(is.images, imageID)
|
||||
return "", err
|
||||
}
|
||||
|
@ -175,13 +175,13 @@ func (is *store) Search(term string) (ID, error) {
|
|||
}
|
||||
return "", err
|
||||
}
|
||||
return ID(dgst), nil
|
||||
return IDFromDigest(dgst), nil
|
||||
}
|
||||
|
||||
func (is *store) Get(id ID) (*Image, error) {
|
||||
// todo: Check if image is in images
|
||||
// todo: Detect manual insertions and start using them
|
||||
config, err := is.fs.Get(id)
|
||||
config, err := is.fs.Get(id.Digest())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -209,17 +209,17 @@ func (is *store) Delete(id ID) ([]layer.Metadata, error) {
|
|||
return nil, fmt.Errorf("unrecognized image ID %s", id.String())
|
||||
}
|
||||
for id := range imageMeta.children {
|
||||
is.fs.DeleteMetadata(id, "parent")
|
||||
is.fs.DeleteMetadata(id.Digest(), "parent")
|
||||
}
|
||||
if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil {
|
||||
delete(is.images[parent].children, id)
|
||||
}
|
||||
|
||||
if err := is.digestSet.Remove(digest.Digest(id)); err != nil {
|
||||
if err := is.digestSet.Remove(id.Digest()); err != nil {
|
||||
logrus.Errorf("error removing %s from digest set: %q", id, err)
|
||||
}
|
||||
delete(is.images, id)
|
||||
is.fs.Delete(id)
|
||||
is.fs.Delete(id.Digest())
|
||||
|
||||
if imageMeta.layer != nil {
|
||||
return is.ls.Release(imageMeta.layer)
|
||||
|
@ -238,11 +238,11 @@ func (is *store) SetParent(id, parent ID) error {
|
|||
delete(is.images[parent].children, id)
|
||||
}
|
||||
parentMeta.children[id] = struct{}{}
|
||||
return is.fs.SetMetadata(id, "parent", []byte(parent))
|
||||
return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent))
|
||||
}
|
||||
|
||||
func (is *store) GetParent(id ID) (ID, error) {
|
||||
d, err := is.fs.GetMetadata(id, "parent")
|
||||
d, err := is.fs.GetMetadata(id.Digest(), "parent")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
|
@ -123,7 +124,7 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool)
|
|||
if !ok {
|
||||
return fmt.Errorf("invalid tag %q", repoTag)
|
||||
}
|
||||
l.setLoadedTag(ref, imgID, outStream)
|
||||
l.setLoadedTag(ref, imgID.Digest(), outStream)
|
||||
outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", ref)))
|
||||
imageRefCount++
|
||||
}
|
||||
|
@ -195,7 +196,7 @@ func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string,
|
|||
return l.ls.Register(inflatedLayerData, rootFS.ChainID())
|
||||
}
|
||||
|
||||
func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID image.ID, outStream io.Writer) error {
|
||||
func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID digest.Digest, outStream io.Writer) error {
|
||||
if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID {
|
||||
fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags
|
||||
}
|
||||
|
@ -253,7 +254,7 @@ func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOut
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.setLoadedTag(ref, imgID, outStream)
|
||||
l.setLoadedTag(ref, imgID.Digest(), outStream)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -77,11 +77,11 @@ func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor,
|
|||
return nil, err
|
||||
}
|
||||
if id != "" {
|
||||
_, err := l.is.Get(image.ID(id))
|
||||
_, err := l.is.Get(image.IDFromDigest(id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addAssoc(image.ID(id), nil)
|
||||
addAssoc(image.IDFromDigest(id), nil)
|
||||
continue
|
||||
}
|
||||
if ref.Name() == string(digest.Canonical) {
|
||||
|
@ -95,7 +95,7 @@ func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor,
|
|||
if reference.IsNameOnly(ref) {
|
||||
assocs := l.rs.ReferencesByName(ref)
|
||||
for _, assoc := range assocs {
|
||||
addAssoc(assoc.ImageID, assoc.Ref)
|
||||
addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref)
|
||||
}
|
||||
if len(assocs) == 0 {
|
||||
imgID, err := l.is.Search(name)
|
||||
|
@ -106,11 +106,11 @@ func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor,
|
|||
}
|
||||
continue
|
||||
}
|
||||
var imgID image.ID
|
||||
if imgID, err = l.rs.Get(ref); err != nil {
|
||||
id, err = l.rs.Get(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addAssoc(imgID, ref)
|
||||
addAssoc(image.IDFromDigest(id), ref)
|
||||
|
||||
}
|
||||
return imgDescr, nil
|
||||
|
@ -155,7 +155,7 @@ func (s *saveSession) save(outStream io.Writer) error {
|
|||
}
|
||||
|
||||
manifest = append(manifest, manifestItem{
|
||||
Config: digest.Digest(id).Hex() + ".json",
|
||||
Config: id.Digest().Hex() + ".json",
|
||||
RepoTags: repoTags,
|
||||
Layers: layers,
|
||||
LayerSources: foreignSrcs,
|
||||
|
@ -264,7 +264,7 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc
|
|||
}
|
||||
}
|
||||
|
||||
configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json")
|
||||
configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json")
|
||||
if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -294,8 +294,8 @@ func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMapp
|
|||
}
|
||||
|
||||
type refAdder interface {
|
||||
AddTag(ref reference.Named, id image.ID, force bool) error
|
||||
AddDigest(ref reference.Canonical, id image.ID, force bool) error
|
||||
AddTag(ref reference.Named, id digest.Digest, force bool) error
|
||||
AddDigest(ref reference.Canonical, id digest.Digest, force bool) error
|
||||
}
|
||||
|
||||
func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image.ID) error {
|
||||
|
@ -336,7 +336,7 @@ func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image
|
|||
logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err)
|
||||
continue
|
||||
}
|
||||
if err := rs.AddDigest(canonical, strongID, false); err != nil {
|
||||
if err := rs.AddDigest(canonical, strongID.Digest(), false); err != nil {
|
||||
logrus.Errorf("can't migrate digest %q for %q, err: %q", ref.String(), strongID, err)
|
||||
}
|
||||
} else {
|
||||
|
@ -345,7 +345,7 @@ func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image
|
|||
logrus.Errorf("migrate tags: invalid tag %q, %q", tag, err)
|
||||
continue
|
||||
}
|
||||
if err := rs.AddTag(tagRef, strongID, false); err != nil {
|
||||
if err := rs.AddTag(tagRef, strongID.Digest(), false); err != nil {
|
||||
logrus.Errorf("can't migrate tag %q for %q, err: %q", ref.String(), strongID, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -333,14 +333,14 @@ type mockTagAdder struct {
|
|||
refs map[string]string
|
||||
}
|
||||
|
||||
func (t *mockTagAdder) AddTag(ref reference.Named, id image.ID, force bool) error {
|
||||
func (t *mockTagAdder) AddTag(ref reference.Named, id digest.Digest, force bool) error {
|
||||
if t.refs == nil {
|
||||
t.refs = make(map[string]string)
|
||||
}
|
||||
t.refs[ref.String()] = id.String()
|
||||
return nil
|
||||
}
|
||||
func (t *mockTagAdder) AddDigest(ref reference.Canonical, id image.ID, force bool) error {
|
||||
func (t *mockTagAdder) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error {
|
||||
return t.AddTag(ref, id, force)
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
)
|
||||
|
||||
|
@ -22,18 +21,18 @@ var (
|
|||
|
||||
// An Association is a tuple associating a reference with an image ID.
|
||||
type Association struct {
|
||||
Ref Named
|
||||
ImageID image.ID
|
||||
Ref Named
|
||||
ID digest.Digest
|
||||
}
|
||||
|
||||
// Store provides the set of methods which can operate on a tag store.
|
||||
type Store interface {
|
||||
References(id image.ID) []Named
|
||||
References(id digest.Digest) []Named
|
||||
ReferencesByName(ref Named) []Association
|
||||
AddTag(ref Named, id image.ID, force bool) error
|
||||
AddDigest(ref Canonical, id image.ID, force bool) error
|
||||
AddTag(ref Named, id digest.Digest, force bool) error
|
||||
AddDigest(ref Canonical, id digest.Digest, force bool) error
|
||||
Delete(ref Named) (bool, error)
|
||||
Get(ref Named) (image.ID, error)
|
||||
Get(ref Named) (digest.Digest, error)
|
||||
}
|
||||
|
||||
type store struct {
|
||||
|
@ -45,12 +44,12 @@ type store struct {
|
|||
Repositories map[string]repository
|
||||
// referencesByIDCache is a cache of references indexed by ID, to speed
|
||||
// up References.
|
||||
referencesByIDCache map[image.ID]map[string]Named
|
||||
referencesByIDCache map[digest.Digest]map[string]Named
|
||||
}
|
||||
|
||||
// Repository maps tags to image IDs. The key is a stringified Reference,
|
||||
// Repository maps tags to digests. The key is a stringified Reference,
|
||||
// including the repository name.
|
||||
type repository map[string]image.ID
|
||||
type repository map[string]digest.Digest
|
||||
|
||||
type lexicalRefs []Named
|
||||
|
||||
|
@ -75,7 +74,7 @@ func NewReferenceStore(jsonPath string) (Store, error) {
|
|||
store := &store{
|
||||
jsonPath: abspath,
|
||||
Repositories: make(map[string]repository),
|
||||
referencesByIDCache: make(map[image.ID]map[string]Named),
|
||||
referencesByIDCache: make(map[digest.Digest]map[string]Named),
|
||||
}
|
||||
// Load the json file if it exists, otherwise create it.
|
||||
if err := store.reload(); os.IsNotExist(err) {
|
||||
|
@ -90,7 +89,7 @@ func NewReferenceStore(jsonPath string) (Store, error) {
|
|||
|
||||
// AddTag adds a tag reference to the store. If force is set to true, existing
|
||||
// references can be overwritten. This only works for tags, not digests.
|
||||
func (store *store) AddTag(ref Named, id image.ID, force bool) error {
|
||||
func (store *store) AddTag(ref Named, id digest.Digest, force bool) error {
|
||||
if _, isCanonical := ref.(Canonical); isCanonical {
|
||||
return errors.New("refusing to create a tag with a digest reference")
|
||||
}
|
||||
|
@ -98,11 +97,11 @@ func (store *store) AddTag(ref Named, id image.ID, force bool) error {
|
|||
}
|
||||
|
||||
// AddDigest adds a digest reference to the store.
|
||||
func (store *store) AddDigest(ref Canonical, id image.ID, force bool) error {
|
||||
func (store *store) AddDigest(ref Canonical, id digest.Digest, force bool) error {
|
||||
return store.addReference(ref, id, force)
|
||||
}
|
||||
|
||||
func (store *store) addReference(ref Named, id image.ID, force bool) error {
|
||||
func (store *store) addReference(ref Named, id digest.Digest, force bool) error {
|
||||
if ref.Name() == string(digest.Canonical) {
|
||||
return errors.New("refusing to create an ambiguous tag using digest algorithm as name")
|
||||
}
|
||||
|
@ -112,7 +111,7 @@ func (store *store) addReference(ref Named, id image.ID, force bool) error {
|
|||
|
||||
repository, exists := store.Repositories[ref.Name()]
|
||||
if !exists || repository == nil {
|
||||
repository = make(map[string]image.ID)
|
||||
repository = make(map[string]digest.Digest)
|
||||
store.Repositories[ref.Name()] = repository
|
||||
}
|
||||
|
||||
|
@ -179,8 +178,8 @@ func (store *store) Delete(ref Named) (bool, error) {
|
|||
return false, ErrDoesNotExist
|
||||
}
|
||||
|
||||
// Get retrieves an item from the store by
|
||||
func (store *store) Get(ref Named) (image.ID, error) {
|
||||
// Get retrieves an item from the store by reference
|
||||
func (store *store) Get(ref Named) (digest.Digest, error) {
|
||||
ref = WithDefaultTag(ref)
|
||||
|
||||
store.mu.RLock()
|
||||
|
@ -199,9 +198,9 @@ func (store *store) Get(ref Named) (image.ID, error) {
|
|||
return id, nil
|
||||
}
|
||||
|
||||
// References returns a slice of references to the given image ID. The slice
|
||||
// will be nil if there are no references to this image ID.
|
||||
func (store *store) References(id image.ID) []Named {
|
||||
// References returns a slice of references to the given ID. The slice
|
||||
// will be nil if there are no references to this ID.
|
||||
func (store *store) References(id digest.Digest) []Named {
|
||||
store.mu.RLock()
|
||||
defer store.mu.RUnlock()
|
||||
|
||||
|
@ -240,8 +239,8 @@ func (store *store) ReferencesByName(ref Named) []Association {
|
|||
}
|
||||
associations = append(associations,
|
||||
Association{
|
||||
Ref: ref,
|
||||
ImageID: refID,
|
||||
Ref: ref,
|
||||
ID: refID,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -8,11 +8,11 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/distribution/digest"
|
||||
)
|
||||
|
||||
var (
|
||||
saveLoadTestCases = map[string]image.ID{
|
||||
saveLoadTestCases = map[string]digest.Digest{
|
||||
"registry:5000/foobar:HEAD": "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6",
|
||||
"registry:5000/foobar:alternate": "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793",
|
||||
"registry:5000/foobar:latest": "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b",
|
||||
|
@ -115,9 +115,9 @@ func TestAddDeleteGet(t *testing.T) {
|
|||
t.Fatalf("error creating tag store: %v", err)
|
||||
}
|
||||
|
||||
testImageID1 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c")
|
||||
testImageID2 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d")
|
||||
testImageID3 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e")
|
||||
testImageID1 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c")
|
||||
testImageID2 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d")
|
||||
testImageID3 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e")
|
||||
|
||||
// Try adding a reference with no tag or digest
|
||||
nameOnly, err := WithName("username/repo")
|
||||
|
@ -276,19 +276,19 @@ func TestAddDeleteGet(t *testing.T) {
|
|||
if associations[0].Ref.String() != ref3.String() {
|
||||
t.Fatalf("unexpected reference: %v", associations[0].Ref.String())
|
||||
}
|
||||
if associations[0].ImageID != testImageID1 {
|
||||
if associations[0].ID != testImageID1 {
|
||||
t.Fatalf("unexpected reference: %v", associations[0].Ref.String())
|
||||
}
|
||||
if associations[1].Ref.String() != ref1.String() {
|
||||
t.Fatalf("unexpected reference: %v", associations[1].Ref.String())
|
||||
}
|
||||
if associations[1].ImageID != testImageID1 {
|
||||
if associations[1].ID != testImageID1 {
|
||||
t.Fatalf("unexpected reference: %v", associations[1].Ref.String())
|
||||
}
|
||||
if associations[2].Ref.String() != ref2.String() {
|
||||
t.Fatalf("unexpected reference: %v", associations[2].Ref.String())
|
||||
}
|
||||
if associations[2].ImageID != testImageID2 {
|
||||
if associations[2].ID != testImageID2 {
|
||||
t.Fatalf("unexpected reference: %v", associations[2].Ref.String())
|
||||
}
|
||||
|
||||
|
@ -331,7 +331,7 @@ func TestInvalidTags(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("error creating tag store: %v", err)
|
||||
}
|
||||
id := image.ID("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6")
|
||||
id := digest.Digest("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6")
|
||||
|
||||
// sha256 as repo name
|
||||
ref, err := ParseNamed("sha256:abc")
|
||||
|
|
Loading…
Reference in a new issue