Selaa lähdekoodia

Merge pull request #19109 from BrianBland/crossRepositoryPush

Cross repository push
Tibor Vass 9 vuotta sitten
vanhempi
commit
94b2d56690

+ 1 - 1
Dockerfile

@@ -155,7 +155,7 @@ RUN set -x \
 # both. This allows integration-cli tests to cover push/pull with both schema1
 # and schema2 manifests.
 ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
-ENV REGISTRY_COMMIT a7ae88da459b98b481a245e5b1750134724ac67d
+ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \

+ 0 - 100
distribution/metadata/blobsum_service.go

@@ -1,100 +0,0 @@
-package metadata
-
-import (
-	"encoding/json"
-
-	"github.com/docker/distribution/digest"
-	"github.com/docker/docker/layer"
-)
-
-// BlobSumService maps layer IDs to a set of known blobsums for
-// the layer.
-type BlobSumService struct {
-	store Store
-}
-
-// maxBlobSums is the number of blobsums to keep per layer DiffID.
-const maxBlobSums = 5
-
-// NewBlobSumService creates a new blobsum mapping service.
-func NewBlobSumService(store Store) *BlobSumService {
-	return &BlobSumService{
-		store: store,
-	}
-}
-
-func (blobserv *BlobSumService) diffIDNamespace() string {
-	return "blobsum-storage"
-}
-
-func (blobserv *BlobSumService) blobSumNamespace() string {
-	return "blobsum-lookup"
-}
-
-func (blobserv *BlobSumService) diffIDKey(diffID layer.DiffID) string {
-	return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
-}
-
-func (blobserv *BlobSumService) blobSumKey(blobsum digest.Digest) string {
-	return string(blobsum.Algorithm()) + "/" + blobsum.Hex()
-}
-
-// GetBlobSums finds the blobsums associated with a layer DiffID.
-func (blobserv *BlobSumService) GetBlobSums(diffID layer.DiffID) ([]digest.Digest, error) {
-	jsonBytes, err := blobserv.store.Get(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID))
-	if err != nil {
-		return nil, err
-	}
-
-	var blobsums []digest.Digest
-	if err := json.Unmarshal(jsonBytes, &blobsums); err != nil {
-		return nil, err
-	}
-
-	return blobsums, nil
-}
-
-// GetDiffID finds a layer DiffID from a blobsum hash.
-func (blobserv *BlobSumService) GetDiffID(blobsum digest.Digest) (layer.DiffID, error) {
-	diffIDBytes, err := blobserv.store.Get(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum))
-	if err != nil {
-		return layer.DiffID(""), err
-	}
-
-	return layer.DiffID(diffIDBytes), nil
-}
-
-// Add associates a blobsum with a layer DiffID. If too many blobsums are
-// present, the oldest one is dropped.
-func (blobserv *BlobSumService) Add(diffID layer.DiffID, blobsum digest.Digest) error {
-	oldBlobSums, err := blobserv.GetBlobSums(diffID)
-	if err != nil {
-		oldBlobSums = nil
-	}
-	newBlobSums := make([]digest.Digest, 0, len(oldBlobSums)+1)
-
-	// Copy all other blobsums to new slice
-	for _, oldSum := range oldBlobSums {
-		if oldSum != blobsum {
-			newBlobSums = append(newBlobSums, oldSum)
-		}
-	}
-
-	newBlobSums = append(newBlobSums, blobsum)
-
-	if len(newBlobSums) > maxBlobSums {
-		newBlobSums = newBlobSums[len(newBlobSums)-maxBlobSums:]
-	}
-
-	jsonBytes, err := json.Marshal(newBlobSums)
-	if err != nil {
-		return err
-	}
-
-	err = blobserv.store.Set(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID), jsonBytes)
-	if err != nil {
-		return err
-	}
-
-	return blobserv.store.Set(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum), []byte(diffID))
-}

+ 0 - 105
distribution/metadata/blobsum_service_test.go

@@ -1,105 +0,0 @@
-package metadata
-
-import (
-	"io/ioutil"
-	"os"
-	"reflect"
-	"testing"
-
-	"github.com/docker/distribution/digest"
-	"github.com/docker/docker/layer"
-)
-
-func TestBlobSumService(t *testing.T) {
-	tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test")
-	if err != nil {
-		t.Fatalf("could not create temp dir: %v", err)
-	}
-	defer os.RemoveAll(tmpDir)
-
-	metadataStore, err := NewFSMetadataStore(tmpDir)
-	if err != nil {
-		t.Fatalf("could not create metadata store: %v", err)
-	}
-	blobSumService := NewBlobSumService(metadataStore)
-
-	testVectors := []struct {
-		diffID   layer.DiffID
-		blobsums []digest.Digest
-	}{
-		{
-			diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
-			blobsums: []digest.Digest{
-				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
-			},
-		},
-		{
-			diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
-			blobsums: []digest.Digest{
-				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
-				digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"),
-			},
-		},
-		{
-			diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
-			blobsums: []digest.Digest{
-				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
-				digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"),
-				digest.Digest("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"),
-				digest.Digest("sha256:8902a7ca89aabbb868835260912159026637634090dd8899eee969523252236e"),
-				digest.Digest("sha256:c84364306344ccc48532c52ff5209236273525231dddaaab53262322352883aa"),
-				digest.Digest("sha256:aa7583bbc87532a8352bbb72520a821b3623523523a8352523a52352aaa888fe"),
-			},
-		},
-	}
-
-	// Set some associations
-	for _, vec := range testVectors {
-		for _, blobsum := range vec.blobsums {
-			err := blobSumService.Add(vec.diffID, blobsum)
-			if err != nil {
-				t.Fatalf("error calling Set: %v", err)
-			}
-		}
-	}
-
-	// Check the correct values are read back
-	for _, vec := range testVectors {
-		blobsums, err := blobSumService.GetBlobSums(vec.diffID)
-		if err != nil {
-			t.Fatalf("error calling Get: %v", err)
-		}
-		expectedBlobsums := len(vec.blobsums)
-		if expectedBlobsums > 5 {
-			expectedBlobsums = 5
-		}
-		if !reflect.DeepEqual(blobsums, vec.blobsums[len(vec.blobsums)-expectedBlobsums:len(vec.blobsums)]) {
-			t.Fatal("Get returned incorrect layer ID")
-		}
-	}
-
-	// Test GetBlobSums on a nonexistent entry
-	_, err = blobSumService.GetBlobSums(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
-	if err == nil {
-		t.Fatal("expected error looking up nonexistent entry")
-	}
-
-	// Test GetDiffID on a nonexistent entry
-	_, err = blobSumService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
-	if err == nil {
-		t.Fatal("expected error looking up nonexistent entry")
-	}
-
-	// Overwrite one of the entries and read it back
-	err = blobSumService.Add(testVectors[1].diffID, testVectors[0].blobsums[0])
-	if err != nil {
-		t.Fatalf("error calling Add: %v", err)
-	}
-	diffID, err := blobSumService.GetDiffID(testVectors[0].blobsums[0])
-	if err != nil {
-		t.Fatalf("error calling GetDiffID: %v", err)
-	}
-	if diffID != testVectors[1].diffID {
-		t.Fatal("GetDiffID returned incorrect diffID")
-	}
-}

+ 12 - 0
distribution/metadata/metadata.go

@@ -15,6 +15,8 @@ type Store interface {
 	Get(namespace string, key string) ([]byte, error)
 	// Set writes data indexed by namespace and key.
 	Set(namespace, key string, value []byte) error
+	// Delete removes data indexed by namespace and key.
+	Delete(namespace, key string) error
 }
 
 // FSMetadataStore uses the filesystem to associate metadata with layer and
@@ -63,3 +65,13 @@ func (store *FSMetadataStore) Set(namespace, key string, value []byte) error {
 	}
 	return os.Rename(tempFilePath, path)
 }
+
+// Delete removes data indexed by namespace and key. The data file named after
+// the key, stored in the namespace's directory is deleted.
+func (store *FSMetadataStore) Delete(namespace, key string) error {
+	store.Lock()
+	defer store.Unlock()
+
+	path := store.path(namespace, key)
+	return os.Remove(path)
+}

+ 137 - 0
distribution/metadata/v2_metadata_service.go

@@ -0,0 +1,137 @@
+package metadata
+
+import (
+	"encoding/json"
+
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/layer"
+)
+
+// V2MetadataService maps layer IDs to a set of known metadata for
+// the layer.
+type V2MetadataService struct {
+	store Store
+}
+
+// V2Metadata contains the digest and source repository information for a layer.
+type V2Metadata struct {
+	Digest           digest.Digest
+	SourceRepository string
+}
+
+// maxMetadata is the number of metadata entries to keep per layer DiffID.
+const maxMetadata = 50
+
+// NewV2MetadataService creates a new diff ID to v2 metadata mapping service.
+func NewV2MetadataService(store Store) *V2MetadataService {
+	return &V2MetadataService{
+		store: store,
+	}
+}
+
+func (serv *V2MetadataService) diffIDNamespace() string {
+	return "v2metadata-by-diffid"
+}
+
+func (serv *V2MetadataService) digestNamespace() string {
+	return "diffid-by-digest"
+}
+
+func (serv *V2MetadataService) diffIDKey(diffID layer.DiffID) string {
+	return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
+}
+
+func (serv *V2MetadataService) digestKey(dgst digest.Digest) string {
+	return string(dgst.Algorithm()) + "/" + dgst.Hex()
+}
+
+// GetMetadata finds the metadata associated with a layer DiffID.
+func (serv *V2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) {
+	jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID))
+	if err != nil {
+		return nil, err
+	}
+
+	var metadata []V2Metadata
+	if err := json.Unmarshal(jsonBytes, &metadata); err != nil {
+		return nil, err
+	}
+
+	return metadata, nil
+}
+
+// GetDiffID finds a layer DiffID from a digest.
+func (serv *V2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) {
+	diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst))
+	if err != nil {
+		return layer.DiffID(""), err
+	}
+
+	return layer.DiffID(diffIDBytes), nil
+}
+
+// Add associates metadata with a layer DiffID. If too many metadata entries are
+// present, the oldest one is dropped.
+func (serv *V2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error {
+	oldMetadata, err := serv.GetMetadata(diffID)
+	if err != nil {
+		oldMetadata = nil
+	}
+	newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1)
+
+	// Copy all other metadata to new slice
+	for _, oldMeta := range oldMetadata {
+		if oldMeta != metadata {
+			newMetadata = append(newMetadata, oldMeta)
+		}
+	}
+
+	newMetadata = append(newMetadata, metadata)
+
+	if len(newMetadata) > maxMetadata {
+		newMetadata = newMetadata[len(newMetadata)-maxMetadata:]
+	}
+
+	jsonBytes, err := json.Marshal(newMetadata)
+	if err != nil {
+		return err
+	}
+
+	err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
+	if err != nil {
+		return err
+	}
+
+	return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID))
+}
+
+// Remove unassociates a metadata entry from a layer DiffID.
+func (serv *V2MetadataService) Remove(metadata V2Metadata) error {
+	diffID, err := serv.GetDiffID(metadata.Digest)
+	if err != nil {
+		return err
+	}
+	oldMetadata, err := serv.GetMetadata(diffID)
+	if err != nil {
+		oldMetadata = nil
+	}
+	newMetadata := make([]V2Metadata, 0, len(oldMetadata))
+
+	// Copy all other metadata to new slice
+	for _, oldMeta := range oldMetadata {
+		if oldMeta != metadata {
+			newMetadata = append(newMetadata, oldMeta)
+		}
+	}
+
+	if len(newMetadata) == 0 {
+		return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID))
+	}
+
+	jsonBytes, err := json.Marshal(newMetadata)
+	if err != nil {
+		return err
+	}
+
+	return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
+}

+ 115 - 0
distribution/metadata/v2_metadata_service_test.go

@@ -0,0 +1,115 @@
+package metadata
+
+import (
+	"encoding/hex"
+	"io/ioutil"
+	"math/rand"
+	"os"
+	"reflect"
+	"testing"
+
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/layer"
+)
+
+func TestV2MetadataService(t *testing.T) {
+	tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test")
+	if err != nil {
+		t.Fatalf("could not create temp dir: %v", err)
+	}
+	defer os.RemoveAll(tmpDir)
+
+	metadataStore, err := NewFSMetadataStore(tmpDir)
+	if err != nil {
+		t.Fatalf("could not create metadata store: %v", err)
+	}
+	V2MetadataService := NewV2MetadataService(metadataStore)
+
+	tooManyBlobSums := make([]V2Metadata, 100)
+	for i := range tooManyBlobSums {
+		randDigest := randomDigest()
+		tooManyBlobSums[i] = V2Metadata{Digest: randDigest}
+	}
+
+	testVectors := []struct {
+		diffID   layer.DiffID
+		metadata []V2Metadata
+	}{
+		{
+			diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
+			metadata: []V2Metadata{
+				{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
+			},
+		},
+		{
+			diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
+			metadata: []V2Metadata{
+				{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
+				{Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")},
+			},
+		},
+		{
+			diffID:   layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
+			metadata: tooManyBlobSums,
+		},
+	}
+
+	// Set some associations
+	for _, vec := range testVectors {
+		for _, blobsum := range vec.metadata {
+			err := V2MetadataService.Add(vec.diffID, blobsum)
+			if err != nil {
+				t.Fatalf("error calling Set: %v", err)
+			}
+		}
+	}
+
+	// Check the correct values are read back
+	for _, vec := range testVectors {
+		metadata, err := V2MetadataService.GetMetadata(vec.diffID)
+		if err != nil {
+			t.Fatalf("error calling Get: %v", err)
+		}
+		expectedMetadataEntries := len(vec.metadata)
+		if expectedMetadataEntries > 50 {
+			expectedMetadataEntries = 50
+		}
+		if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) {
+			t.Fatal("Get returned incorrect layer ID")
+		}
+	}
+
+	// Test GetMetadata on a nonexistent entry
+	_, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
+	if err == nil {
+		t.Fatal("expected error looking up nonexistent entry")
+	}
+
+	// Test GetDiffID on a nonexistent entry
+	_, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
+	if err == nil {
+		t.Fatal("expected error looking up nonexistent entry")
+	}
+
+	// Overwrite one of the entries and read it back
+	err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0])
+	if err != nil {
+		t.Fatalf("error calling Add: %v", err)
+	}
+	diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest)
+	if err != nil {
+		t.Fatalf("error calling GetDiffID: %v", err)
+	}
+	if diffID != testVectors[1].diffID {
+		t.Fatal("GetDiffID returned incorrect diffID")
+	}
+}
+
+func randomDigest() digest.Digest {
+	b := [32]byte{}
+	for i := 0; i < len(b); i++ {
+		b[i] = byte(rand.Intn(256))
+	}
+	d := hex.EncodeToString(b[:])
+	return digest.Digest("sha256:" + d)
+}

+ 4 - 4
distribution/pull.go

@@ -61,10 +61,10 @@ func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo,
 	switch endpoint.Version {
 	case registry.APIVersion2:
 		return &v2Puller{
-			blobSumService: metadata.NewBlobSumService(imagePullConfig.MetadataStore),
-			endpoint:       endpoint,
-			config:         imagePullConfig,
-			repoInfo:       repoInfo,
+			V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore),
+			endpoint:          endpoint,
+			config:            imagePullConfig,
+			repoInfo:          repoInfo,
 		}, nil
 	case registry.APIVersion1:
 		return &v1Puller{

+ 19 - 16
distribution/pull_v2.go

@@ -33,11 +33,11 @@ import (
 var errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
 
 type v2Puller struct {
-	blobSumService *metadata.BlobSumService
-	endpoint       registry.APIEndpoint
-	config         *ImagePullConfig
-	repoInfo       *registry.RepositoryInfo
-	repo           distribution.Repository
+	V2MetadataService *metadata.V2MetadataService
+	endpoint          registry.APIEndpoint
+	config            *ImagePullConfig
+	repoInfo          *registry.RepositoryInfo
+	repo              distribution.Repository
 	// confirmedV2 is set to true if we confirm we're talking to a v2
 	// registry. This is used to limit fallbacks to the v1 protocol.
 	confirmedV2 bool
@@ -110,9 +110,10 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (e
 }
 
 type v2LayerDescriptor struct {
-	digest         digest.Digest
-	repo           distribution.Repository
-	blobSumService *metadata.BlobSumService
+	digest            digest.Digest
+	repoInfo          *registry.RepositoryInfo
+	repo              distribution.Repository
+	V2MetadataService *metadata.V2MetadataService
 }
 
 func (ld *v2LayerDescriptor) Key() string {
@@ -124,7 +125,7 @@ func (ld *v2LayerDescriptor) ID() string {
 }
 
 func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
-	return ld.blobSumService.GetDiffID(ld.digest)
+	return ld.V2MetadataService.GetDiffID(ld.digest)
 }
 
 func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
@@ -196,7 +197,7 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre
 
 func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
 	// Cache mapping from this layer's DiffID to the blobsum
-	ld.blobSumService.Add(diffID, ld.digest)
+	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
 }
 
 func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
@@ -333,9 +334,10 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverif
 		}
 
 		layerDescriptor := &v2LayerDescriptor{
-			digest:         blobSum,
-			repo:           p.repo,
-			blobSumService: p.blobSumService,
+			digest:            blobSum,
+			repoInfo:          p.repoInfo,
+			repo:              p.repo,
+			V2MetadataService: p.V2MetadataService,
 		}
 
 		descriptors = append(descriptors, layerDescriptor)
@@ -398,9 +400,10 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
 	// to top-most, so that the downloads slice gets ordered correctly.
 	for _, d := range mfst.References() {
 		layerDescriptor := &v2LayerDescriptor{
-			digest:         d.Digest,
-			repo:           p.repo,
-			blobSumService: p.blobSumService,
+			digest:            d.Digest,
+			repo:              p.repo,
+			repoInfo:          p.repoInfo,
+			V2MetadataService: p.V2MetadataService,
 		}
 
 		descriptors = append(descriptors, layerDescriptor)

+ 5 - 5
distribution/push.go

@@ -71,11 +71,11 @@ func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *reg
 	switch endpoint.Version {
 	case registry.APIVersion2:
 		return &v2Pusher{
-			blobSumService: metadata.NewBlobSumService(imagePushConfig.MetadataStore),
-			ref:            ref,
-			endpoint:       endpoint,
-			repoInfo:       repoInfo,
-			config:         imagePushConfig,
+			v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore),
+			ref:               ref,
+			endpoint:          endpoint,
+			repoInfo:          repoInfo,
+			config:            imagePushConfig,
 		}, nil
 	case registry.APIVersion1:
 		return &v1Pusher{

+ 91 - 23
distribution/push_v2.go

@@ -11,6 +11,7 @@ import (
 	"github.com/docker/distribution/digest"
 	"github.com/docker/distribution/manifest/schema1"
 	"github.com/docker/distribution/manifest/schema2"
+	distreference "github.com/docker/distribution/reference"
 	"github.com/docker/distribution/registry/client"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/xfer"
@@ -34,12 +35,12 @@ type PushResult struct {
 }
 
 type v2Pusher struct {
-	blobSumService *metadata.BlobSumService
-	ref            reference.Named
-	endpoint       registry.APIEndpoint
-	repoInfo       *registry.RepositoryInfo
-	config         *ImagePushConfig
-	repo           distribution.Repository
+	v2MetadataService *metadata.V2MetadataService
+	ref               reference.Named
+	endpoint          registry.APIEndpoint
+	repoInfo          *registry.RepositoryInfo
+	config            *ImagePushConfig
+	repo              distribution.Repository
 
 	// pushState is state built by the Download functions.
 	pushState pushState
@@ -130,9 +131,10 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, ima
 	var descriptors []xfer.UploadDescriptor
 
 	descriptorTemplate := v2PushDescriptor{
-		blobSumService: p.blobSumService,
-		repo:           p.repo,
-		pushState:      &p.pushState,
+		v2MetadataService: p.v2MetadataService,
+		repoInfo:          p.repoInfo,
+		repo:              p.repo,
+		pushState:         &p.pushState,
 	}
 
 	// Loop bounds condition is to avoid pushing the base layer on Windows.
@@ -209,10 +211,11 @@ func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuild
 }
 
 type v2PushDescriptor struct {
-	layer          layer.Layer
-	blobSumService *metadata.BlobSumService
-	repo           distribution.Repository
-	pushState      *pushState
+	layer             layer.Layer
+	v2MetadataService *metadata.V2MetadataService
+	repoInfo          reference.Named
+	repo              distribution.Repository
+	pushState         *pushState
 }
 
 func (pd *v2PushDescriptor) Key() string {
@@ -240,10 +243,10 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
 	}
 	pd.pushState.Unlock()
 
-	// Do we have any blobsums associated with this layer's DiffID?
-	possibleBlobsums, err := pd.blobSumService.GetBlobSums(diffID)
+	// Do we have any metadata associated with this layer's DiffID?
+	v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID)
 	if err == nil {
-		descriptor, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.pushState)
+		descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState)
 		if err != nil {
 			progress.Update(progressOutput, pd.ID(), "Image push failed")
 			return retryOnError(err)
@@ -263,8 +266,69 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
 	// then push the blob.
 	bs := pd.repo.Blobs(ctx)
 
+	var mountFrom metadata.V2Metadata
+
+	// Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload
+	for _, metadata := range v2Metadata {
+		sourceRepo, err := reference.ParseNamed(metadata.SourceRepository)
+		if err != nil {
+			continue
+		}
+		if pd.repoInfo.Hostname() == sourceRepo.Hostname() {
+			logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, metadata.Digest, sourceRepo.FullName())
+			mountFrom = metadata
+			break
+		}
+	}
+
+	var createOpts []distribution.BlobCreateOption
+
+	if mountFrom.SourceRepository != "" {
+		namedRef, err := reference.WithName(mountFrom.SourceRepository)
+		if err != nil {
+			return err
+		}
+
+		// TODO (brianbland): We need to construct a reference where the Name is
+		// only the full remote name, so clean this up when distribution has a
+		// richer reference package
+		remoteRef, err := distreference.WithName(namedRef.RemoteName())
+		if err != nil {
+			return err
+		}
+
+		canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest)
+		if err != nil {
+			return err
+		}
+
+		createOpts = append(createOpts, client.WithMountFrom(canonicalRef))
+	}
+
 	// Send the layer
-	layerUpload, err := bs.Create(ctx)
+	layerUpload, err := bs.Create(ctx, createOpts...)
+	switch err := err.(type) {
+	case distribution.ErrBlobMounted:
+		progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())
+
+		pd.pushState.Lock()
+		pd.pushState.confirmedV2 = true
+		pd.pushState.remoteLayers[diffID] = err.Descriptor
+		pd.pushState.Unlock()
+
+		// Cache mapping from this layer's DiffID to the blobsum
+		if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
+			return xfer.DoNotRetry{Err: err}
+		}
+
+		return nil
+	}
+	if mountFrom.SourceRepository != "" {
+		// unable to mount layer from this repository, so this source mapping is no longer valid
+		logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository)
+		pd.v2MetadataService.Remove(mountFrom)
+	}
+
 	if err != nil {
 		return retryOnError(err)
 	}
@@ -300,7 +364,7 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
 	progress.Update(progressOutput, pd.ID(), "Pushed")
 
 	// Cache mapping from this layer's DiffID to the blobsum
-	if err := pd.blobSumService.Add(diffID, pushDigest); err != nil {
+	if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
 		return xfer.DoNotRetry{Err: err}
 	}
 
@@ -329,12 +393,16 @@ func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
 	return pd.pushState.remoteLayers[pd.DiffID()]
 }
 
-// blobSumAlreadyExists checks if the registry already know about any of the
-// blobsums passed in the "blobsums" slice. If it finds one that the registry
+// layerAlreadyExists checks if the registry already know about any of the
+// metadata passed in the "metadata" slice. If it finds one that the registry
 // knows about, it returns the known digest and "true".
-func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
-	for _, dgst := range blobsums {
-		descriptor, err := repo.Blobs(ctx).Stat(ctx, dgst)
+func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
+	for _, meta := range metadata {
+		// Only check blobsums that are known to this repository or have an unknown source
+		if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() {
+			continue
+		}
+		descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest)
 		switch err {
 		case nil:
 			descriptor.MediaType = schema2.MediaTypeLayer

+ 1 - 1
hack/vendor.sh

@@ -45,7 +45,7 @@ clone git github.com/boltdb/bolt v1.1.0
 clone git github.com/miekg/dns d27455715200c7d3e321a1e5cadb27c9ee0b0f02
 
 # get graph and distribution packages
-clone git github.com/docker/distribution a7ae88da459b98b481a245e5b1750134724ac67d
+clone git github.com/docker/distribution cb08de17d74bef86ce6c5abe8b240e282f5750be
 clone git github.com/vbatts/tar-split v0.9.11
 
 # get desired notary commit, might also need to be updated in Dockerfile

+ 52 - 0
integration-cli/docker_cli_push_test.go

@@ -147,6 +147,58 @@ func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) {
 	testPushEmptyLayer(c)
 }
 
+func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) {
+	sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
+	// tag the image to upload it to the private registry
+	dockerCmd(c, "tag", "busybox", sourceRepoName)
+	// push the image to the registry
+	out1, _, err := dockerCmdWithError("push", sourceRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1))
+	// ensure that none of the layers were mounted from another repository during push
+	c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false)
+
+	destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL)
+	// retag the image to upload the same layers to another repo in the same registry
+	dockerCmd(c, "tag", "busybox", destRepoName)
+	// push the image to the registry
+	out2, _, err := dockerCmdWithError("push", destRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2))
+	// ensure that layers were mounted from the first repo during push
+	c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true)
+
+	// ensure that we can pull and run the cross-repo-pushed repository
+	dockerCmd(c, "rmi", destRepoName)
+	dockerCmd(c, "pull", destRepoName)
+	out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world")
+	c.Assert(out3, check.Equals, "hello world")
+}
+
+func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) {
+	sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
+	// tag the image to upload it to the private registry
+	dockerCmd(c, "tag", "busybox", sourceRepoName)
+	// push the image to the registry
+	out1, _, err := dockerCmdWithError("push", sourceRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1))
+	// ensure that none of the layers were mounted from another repository during push
+	c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false)
+
+	destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL)
+	// retag the image to upload the same layers to another repo in the same registry
+	dockerCmd(c, "tag", "busybox", destRepoName)
+	// push the image to the registry
+	out2, _, err := dockerCmdWithError("push", destRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2))
+	// schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen
+	c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, false)
+
+	// ensure that we can pull and run the second pushed repository
+	dockerCmd(c, "rmi", destRepoName)
+	dockerCmd(c, "pull", destRepoName)
+	out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world")
+	c.Assert(out3, check.Equals, "hello world")
+}
+
 func (s *DockerTrustSuite) TestTrustedPush(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL)
 	// tag the image and upload it to the private registry

+ 2 - 2
migrate/v1/migratev1.go

@@ -476,8 +476,8 @@ func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metad
 	if err == nil { // best effort
 		dgst, err := digest.ParseDigest(string(checksum))
 		if err == nil {
-			blobSumService := metadata.NewBlobSumService(ms)
-			blobSumService.Add(layer.DiffID(), dgst)
+			V2MetadataService := metadata.NewV2MetadataService(ms)
+			V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst})
 		}
 	}
 	_, err = ls.Release(layer)

+ 7 - 7
migrate/v1/migratev1_test.go

@@ -210,19 +210,19 @@ func TestMigrateImages(t *testing.T) {
 		t.Fatalf("invalid register count: expected %q, got %q", expected, actual)
 	}
 
-	blobSumService := metadata.NewBlobSumService(ms)
-	blobsums, err := blobSumService.GetBlobSums(layer.EmptyLayer.DiffID())
+	v2MetadataService := metadata.NewV2MetadataService(ms)
+	receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID())
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	expectedBlobsums := []digest.Digest{
-		"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57",
-		"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4",
+	expectedMetadata := []metadata.V2Metadata{
+		{Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")},
+		{Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
 	}
 
-	if !reflect.DeepEqual(expectedBlobsums, blobsums) {
-		t.Fatalf("invalid blobsums: expected %q, got %q", expectedBlobsums, blobsums)
+	if !reflect.DeepEqual(expectedMetadata, receivedMetadata) {
+		t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata)
 	}
 
 }

+ 23 - 1
vendor/src/github.com/docker/distribution/blobs.go

@@ -9,6 +9,7 @@ import (
 
 	"github.com/docker/distribution/context"
 	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/reference"
 )
 
 var (
@@ -40,6 +41,18 @@ func (err ErrBlobInvalidDigest) Error() string {
 		err.Digest, err.Reason)
 }
 
+// ErrBlobMounted returned when a blob is mounted from another repository
+// instead of initiating an upload session.
+type ErrBlobMounted struct {
+	From       reference.Canonical
+	Descriptor Descriptor
+}
+
+func (err ErrBlobMounted) Error() string {
+	return fmt.Sprintf("blob mounted from: %v to: %v",
+		err.From, err.Descriptor)
+}
+
 // Descriptor describes targeted content. Used in conjunction with a blob
 // store, a descriptor can be used to fetch, store and target any kind of
 // blob. The struct also describes the wire protocol format. Fields should
@@ -151,12 +164,21 @@ type BlobIngester interface {
 	// returned handle can be written to and later resumed using an opaque
 	// identifier. With this approach, one can Close and Resume a BlobWriter
 	// multiple times until the BlobWriter is committed or cancelled.
-	Create(ctx context.Context) (BlobWriter, error)
+	Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
 
 	// Resume attempts to resume a write to a blob, identified by an id.
 	Resume(ctx context.Context, id string) (BlobWriter, error)
 }
 
+// BlobCreateOption is a general extensible function argument for blob creation
+// methods. A BlobIngester may choose to honor any or none of the given
+// BlobCreateOptions, which can be specific to the implementation of the
+// BlobIngester receiving them.
+// TODO (brianbland): unify this with ManifestServiceOption in the future
+type BlobCreateOption interface {
+	Apply(interface{}) error
+}
+
 // BlobWriter provides a handle for inserting data into a blob store.
 // Instances should be obtained from BlobWriteService.Writer and
 // BlobWriteService.Resume. If supported by the store, a writer can be

+ 1 - 1
vendor/src/github.com/docker/distribution/circle.yml

@@ -11,7 +11,7 @@ machine:
 
   post:
   # go
-    - gvm install go1.5 --prefer-binary --name=stable
+    - gvm install go1.5.3 --prefer-binary --name=stable
 
   environment:
   # Convenient shortcuts to "common" locations

+ 64 - 0
vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go

@@ -1041,6 +1041,70 @@ var routeDescriptors = []RouteDescriptor{
 							deniedResponseDescriptor,
 						},
 					},
+					{
+						Name:        "Mount Blob",
+						Description: "Mount a blob identified by the `mount` parameter from another repository.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							contentLengthZeroHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+						},
+						QueryParameters: []ParameterDescriptor{
+							{
+								Name:        "mount",
+								Type:        "query",
+								Format:      "<digest>",
+								Regexp:      digest.DigestRegexp,
+								Description: `Digest of blob to mount from the source repository.`,
+							},
+							{
+								Name:        "from",
+								Type:        "query",
+								Format:      "<repository name>",
+								Regexp:      reference.NameRegexp,
+								Description: `Name of the source repository.`,
+							},
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The blob has been mounted in the repository and is available at the provided location.",
+								StatusCode:  http.StatusCreated,
+								Headers: []ParameterDescriptor{
+									{
+										Name:   "Location",
+										Type:   "url",
+										Format: "<blob location>",
+									},
+									contentLengthZeroHeader,
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:       "Invalid Name or Digest",
+								StatusCode: http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+								},
+							},
+							{
+								Name:        "Not allowed",
+								Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason",
+								StatusCode:  http.StatusMethodNotAllowed,
+								ErrorCodes: []errcode.ErrorCode{
+									errcode.ErrorCodeUnsupported,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+						},
+					},
 				},
 			},
 		},

+ 25 - 3
vendor/src/github.com/docker/distribution/registry/client/auth/session.go

@@ -108,6 +108,8 @@ type tokenHandler struct {
 	tokenLock       sync.Mutex
 	tokenCache      string
 	tokenExpiration time.Time
+
+	additionalScopes map[string]struct{}
 }
 
 // tokenScope represents the scope at which a token will be requested.
@@ -145,6 +147,7 @@ func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock
 			Scope:    scope,
 			Actions:  actions,
 		},
+		additionalScopes: map[string]struct{}{},
 	}
 }
 
@@ -160,7 +163,15 @@ func (th *tokenHandler) Scheme() string {
 }
 
 func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
-	if err := th.refreshToken(params); err != nil {
+	var additionalScopes []string
+	if fromParam := req.URL.Query().Get("from"); fromParam != "" {
+		additionalScopes = append(additionalScopes, tokenScope{
+			Resource: "repository",
+			Scope:    fromParam,
+			Actions:  []string{"pull"},
+		}.String())
+	}
+	if err := th.refreshToken(params, additionalScopes...); err != nil {
 		return err
 	}
 
@@ -169,11 +180,18 @@ func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]st
 	return nil
 }
 
-func (th *tokenHandler) refreshToken(params map[string]string) error {
+func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes ...string) error {
 	th.tokenLock.Lock()
 	defer th.tokenLock.Unlock()
+	var addedScopes bool
+	for _, scope := range additionalScopes {
+		if _, ok := th.additionalScopes[scope]; !ok {
+			th.additionalScopes[scope] = struct{}{}
+			addedScopes = true
+		}
+	}
 	now := th.clock.Now()
-	if now.After(th.tokenExpiration) {
+	if now.After(th.tokenExpiration) || addedScopes {
 		tr, err := th.fetchToken(params)
 		if err != nil {
 			return err
@@ -223,6 +241,10 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon
 		reqParams.Add("scope", scopeField)
 	}
 
+	for scope := range th.additionalScopes {
+		reqParams.Add("scope", scope)
+	}
+
 	if th.creds != nil {
 		username, password := th.creds.Basic(realmURL)
 		if username != "" && password != "" {

+ 61 - 4
vendor/src/github.com/docker/distribution/registry/client/repository.go

@@ -572,8 +572,57 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut
 	return writer.Commit(ctx, desc)
 }
 
-func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
-	u, err := bs.ub.BuildBlobUploadURL(bs.name)
+// createOptions is a collection of blob creation modifiers relevant to general
+// blob storage intended to be configured by the BlobCreateOption.Apply method.
+type createOptions struct {
+	Mount struct {
+		ShouldMount bool
+		From        reference.Canonical
+	}
+}
+
+type optionFunc func(interface{}) error
+
+func (f optionFunc) Apply(v interface{}) error {
+	return f(v)
+}
+
+// WithMountFrom returns a BlobCreateOption which designates that the blob should be
+// mounted from the given canonical reference.
+func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
+	return optionFunc(func(v interface{}) error {
+		opts, ok := v.(*createOptions)
+		if !ok {
+			return fmt.Errorf("unexpected options type: %T", v)
+		}
+
+		opts.Mount.ShouldMount = true
+		opts.Mount.From = ref
+
+		return nil
+	})
+}
+
+func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
+	var opts createOptions
+
+	for _, option := range options {
+		err := option.Apply(&opts)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	var values []url.Values
+
+	if opts.Mount.ShouldMount {
+		values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
+	}
+
+	u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
+	if err != nil {
+		return nil, err
+	}
 
 	resp, err := bs.client.Post(u, "", nil)
 	if err != nil {
@@ -581,7 +630,14 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
 	}
 	defer resp.Body.Close()
 
-	if SuccessStatus(resp.StatusCode) {
+	switch resp.StatusCode {
+	case http.StatusCreated:
+		desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
+		if err != nil {
+			return nil, err
+		}
+		return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
+	case http.StatusAccepted:
 		// TODO(dmcgowan): Check for invalid UUID
 		uuid := resp.Header.Get("Docker-Upload-UUID")
 		location, err := sanitizeLocation(resp.Header.Get("Location"), u)
@@ -596,8 +652,9 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
 			startedAt: time.Now(),
 			location:  location,
 		}, nil
+	default:
+		return nil, HandleErrorResponse(resp)
 	}
-	return nil, HandleErrorResponse(resp)
 }
 
 func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {