Ver Fonte

Change push to use manifest builder

Currently this always uses the schema1 manifest builder. Later, it will
be changed to attempt schema2 first, and fall back when necessary.

Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
Aaron Lehmann há 9 anos atrás
pai
commit
f33fa1b8d3

+ 0 - 2
distribution/push.go

@@ -7,7 +7,6 @@ import (
 	"io"
 	"io"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/distribution/digest"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/xfer"
 	"github.com/docker/docker/distribution/xfer"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
@@ -77,7 +76,6 @@ func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *reg
 			endpoint:       endpoint,
 			endpoint:       endpoint,
 			repoInfo:       repoInfo,
 			repoInfo:       repoInfo,
 			config:         imagePushConfig,
 			config:         imagePushConfig,
-			layersPushed:   pushMap{layersPushed: make(map[digest.Digest]bool)},
 		}, nil
 		}, nil
 	case registry.APIVersion1:
 	case registry.APIVersion1:
 		return &v1Pusher{
 		return &v1Pusher{

+ 88 - 195
distribution/push_v2.go

@@ -1,22 +1,18 @@
 package distribution
 package distribution
 
 
 import (
 import (
-	"encoding/json"
-	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"sync"
 	"sync"
-	"time"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution"
 	"github.com/docker/distribution"
 	"github.com/docker/distribution/digest"
 	"github.com/docker/distribution/digest"
-	"github.com/docker/distribution/manifest"
 	"github.com/docker/distribution/manifest/schema1"
 	"github.com/docker/distribution/manifest/schema1"
+	"github.com/docker/distribution/manifest/schema2"
+	"github.com/docker/distribution/registry/client"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/xfer"
 	"github.com/docker/docker/distribution/xfer"
-	"github.com/docker/docker/image"
-	"github.com/docker/docker/image/v1"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/pkg/progress"
@@ -43,31 +39,34 @@ type v2Pusher struct {
 	config         *ImagePushConfig
 	config         *ImagePushConfig
 	repo           distribution.Repository
 	repo           distribution.Repository
 
 
-	// confirmedV2 is set to true if we confirm we're talking to a v2
-	// registry. This is used to limit fallbacks to the v1 protocol.
-	confirmedV2 bool
-
-	// layersPushed is the set of layers known to exist on the remote side.
-	// This avoids redundant queries when pushing multiple tags that
-	// involve the same layers.
-	layersPushed pushMap
+	// pushState is state built by the Download functions.
+	pushState pushState
 }
 }
 
 
-type pushMap struct {
+type pushState struct {
 	sync.Mutex
 	sync.Mutex
-	layersPushed map[digest.Digest]bool
+	// remoteLayers is the set of layers known to exist on the remote side.
+	// This avoids redundant queries when pushing multiple tags that
+	// involve the same layers. It is also used to fill in digest and size
+	// information when building the manifest.
+	remoteLayers map[layer.DiffID]distribution.Descriptor
+	// confirmedV2 is set to true if we confirm we're talking to a v2
+	// registry. This is used to limit fallbacks to the v1 protocol.
+	confirmedV2 bool
 }
 }
 
 
 func (p *v2Pusher) Push(ctx context.Context) (err error) {
 func (p *v2Pusher) Push(ctx context.Context) (err error) {
-	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
+	p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor)
+
+	p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
 	if err != nil {
 	if err != nil {
 		logrus.Debugf("Error getting v2 registry: %v", err)
 		logrus.Debugf("Error getting v2 registry: %v", err)
-		return fallbackError{err: err, confirmedV2: p.confirmedV2}
+		return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
 	}
 	}
 
 
 	if err = p.pushV2Repository(ctx); err != nil {
 	if err = p.pushV2Repository(ctx); err != nil {
 		if registry.ContinueOnError(err) {
 		if registry.ContinueOnError(err) {
-			return fallbackError{err: err, confirmedV2: p.confirmedV2}
+			return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
 		}
 		}
 	}
 	}
 	return err
 	return err
@@ -134,18 +133,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
 	descriptorTemplate := v2PushDescriptor{
 	descriptorTemplate := v2PushDescriptor{
 		blobSumService: p.blobSumService,
 		blobSumService: p.blobSumService,
 		repo:           p.repo,
 		repo:           p.repo,
-		layersPushed:   &p.layersPushed,
-		confirmedV2:    &p.confirmedV2,
-	}
-
-	// Push empty layer if necessary
-	for _, h := range img.History {
-		if h.EmptyLayer {
-			descriptor := descriptorTemplate
-			descriptor.layer = layer.EmptyLayer
-			descriptors = []xfer.UploadDescriptor{&descriptor}
-			break
-		}
+		pushState:      &p.pushState,
 	}
 	}
 
 
 	// Loop bounds condition is to avoid pushing the base layer on Windows.
 	// Loop bounds condition is to avoid pushing the base layer on Windows.
@@ -157,8 +145,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
 		l = l.Parent()
 		l = l.Parent()
 	}
 	}
 
 
-	fsLayers, err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput)
-	if err != nil {
+	if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -166,18 +153,22 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
 	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
 	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
 		tag = tagged.Tag()
 		tag = tagged.Tag()
 	}
 	}
-	m, err := CreateV2Manifest(p.repo.Name(), tag, img, fsLayers)
-	if err != nil {
-		return err
+	builder := schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, p.repo.Name(), tag, img.RawJSON())
+
+	// descriptors is in reverse order; iterate backwards to get references
+	// appended in the right order.
+	for i := len(descriptors) - 1; i >= 0; i-- {
+		if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil {
+			return err
+		}
 	}
 	}
 
 
-	logrus.Infof("Signed manifest for %s using daemon's key: %s", ref.String(), p.config.TrustKey.KeyID())
-	signed, err := schema1.Sign(m, p.config.TrustKey)
+	manifest, err := builder.Build(ctx)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	manifestDigest, manifestSize, err := digestFromManifest(signed, ref)
+	manifestDigest, manifestSize, err := digestFromManifest(manifest.(*schema1.SignedManifest), ref)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -194,7 +185,12 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	_, err = manSvc.Put(ctx, signed)
+
+	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
+		_, err = manSvc.Put(ctx, manifest, client.WithTag(tagged.Tag()))
+	} else {
+		_, err = manSvc.Put(ctx, manifest)
+	}
 	// FIXME create a tag
 	// FIXME create a tag
 	return err
 	return err
 }
 }
@@ -203,8 +199,7 @@ type v2PushDescriptor struct {
 	layer          layer.Layer
 	layer          layer.Layer
 	blobSumService *metadata.BlobSumService
 	blobSumService *metadata.BlobSumService
 	repo           distribution.Repository
 	repo           distribution.Repository
-	layersPushed   *pushMap
-	confirmedV2    *bool
+	pushState      *pushState
 }
 }
 
 
 func (pd *v2PushDescriptor) Key() string {
 func (pd *v2PushDescriptor) Key() string {
@@ -219,25 +214,38 @@ func (pd *v2PushDescriptor) DiffID() layer.DiffID {
 	return pd.layer.DiffID()
 	return pd.layer.DiffID()
 }
 }
 
 
-func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
+func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
 	diffID := pd.DiffID()
 	diffID := pd.DiffID()
 
 
-	logrus.Debugf("Pushing layer: %s", diffID)
+	pd.pushState.Lock()
+	if _, ok := pd.pushState.remoteLayers[diffID]; ok {
+		// it is already known that the push is not needed and
+		// therefore doing a stat is unnecessary
+		pd.pushState.Unlock()
+		progress.Update(progressOutput, pd.ID(), "Layer already exists")
+		return nil
+	}
+	pd.pushState.Unlock()
 
 
 	// Do we have any blobsums associated with this layer's DiffID?
 	// Do we have any blobsums associated with this layer's DiffID?
 	possibleBlobsums, err := pd.blobSumService.GetBlobSums(diffID)
 	possibleBlobsums, err := pd.blobSumService.GetBlobSums(diffID)
 	if err == nil {
 	if err == nil {
-		dgst, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.layersPushed)
+		descriptor, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.pushState)
 		if err != nil {
 		if err != nil {
 			progress.Update(progressOutput, pd.ID(), "Image push failed")
 			progress.Update(progressOutput, pd.ID(), "Image push failed")
-			return "", retryOnError(err)
+			return retryOnError(err)
 		}
 		}
 		if exists {
 		if exists {
 			progress.Update(progressOutput, pd.ID(), "Layer already exists")
 			progress.Update(progressOutput, pd.ID(), "Layer already exists")
-			return dgst, nil
+			pd.pushState.Lock()
+			pd.pushState.remoteLayers[diffID] = descriptor
+			pd.pushState.Unlock()
+			return nil
 		}
 		}
 	}
 	}
 
 
+	logrus.Debugf("Pushing layer: %s", diffID)
+
 	// if digest was empty or not saved, or if blob does not exist on the remote repository,
 	// if digest was empty or not saved, or if blob does not exist on the remote repository,
 	// then push the blob.
 	// then push the blob.
 	bs := pd.repo.Blobs(ctx)
 	bs := pd.repo.Blobs(ctx)
@@ -245,13 +253,13 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
 	// Send the layer
 	// Send the layer
 	layerUpload, err := bs.Create(ctx)
 	layerUpload, err := bs.Create(ctx)
 	if err != nil {
 	if err != nil {
-		return "", retryOnError(err)
+		return retryOnError(err)
 	}
 	}
 	defer layerUpload.Close()
 	defer layerUpload.Close()
 
 
 	arch, err := pd.layer.TarStream()
 	arch, err := pd.layer.TarStream()
 	if err != nil {
 	if err != nil {
-		return "", xfer.DoNotRetry{Err: err}
+		return xfer.DoNotRetry{Err: err}
 	}
 	}
 
 
 	// don't care if this fails; best effort
 	// don't care if this fails; best effort
@@ -267,177 +275,62 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
 	nn, err := layerUpload.ReadFrom(tee)
 	nn, err := layerUpload.ReadFrom(tee)
 	compressedReader.Close()
 	compressedReader.Close()
 	if err != nil {
 	if err != nil {
-		return "", retryOnError(err)
+		return retryOnError(err)
 	}
 	}
 
 
 	pushDigest := digester.Digest()
 	pushDigest := digester.Digest()
 	if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
 	if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
-		return "", retryOnError(err)
+		return retryOnError(err)
 	}
 	}
 
 
-	// If Commit succeded, that's an indication that the remote registry
-	// speaks the v2 protocol.
-	*pd.confirmedV2 = true
-
 	logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
 	logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
 	progress.Update(progressOutput, pd.ID(), "Pushed")
 	progress.Update(progressOutput, pd.ID(), "Pushed")
 
 
 	// Cache mapping from this layer's DiffID to the blobsum
 	// Cache mapping from this layer's DiffID to the blobsum
 	if err := pd.blobSumService.Add(diffID, pushDigest); err != nil {
 	if err := pd.blobSumService.Add(diffID, pushDigest); err != nil {
-		return "", xfer.DoNotRetry{Err: err}
+		return xfer.DoNotRetry{Err: err}
 	}
 	}
 
 
-	pd.layersPushed.Lock()
-	pd.layersPushed.layersPushed[pushDigest] = true
-	pd.layersPushed.Unlock()
+	pd.pushState.Lock()
+
+	// If Commit succeded, that's an indication that the remote registry
+	// speaks the v2 protocol.
+	pd.pushState.confirmedV2 = true
+
+	pd.pushState.remoteLayers[diffID] = distribution.Descriptor{
+		Digest:    pushDigest,
+		MediaType: schema2.MediaTypeLayer,
+		Size:      nn,
+	}
+
+	pd.pushState.Unlock()
+
+	return nil
+}
 
 
-	return pushDigest, nil
+func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
+	// Not necessary to lock pushStatus because this is always
+	// called after all the mutation in pushStatus.
+	// By the time this function is called, every layer will have
+	// an entry in remoteLayers.
+	return pd.pushState.remoteLayers[pd.DiffID()]
 }
 }
 
 
 // blobSumAlreadyExists checks if the registry already know about any of the
 // blobSumAlreadyExists checks if the registry already know about any of the
 // blobsums passed in the "blobsums" slice. If it finds one that the registry
 // blobsums passed in the "blobsums" slice. If it finds one that the registry
 // knows about, it returns the known digest and "true".
 // knows about, it returns the known digest and "true".
-func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, layersPushed *pushMap) (digest.Digest, bool, error) {
-	layersPushed.Lock()
-	for _, dgst := range blobsums {
-		if layersPushed.layersPushed[dgst] {
-			// it is already known that the push is not needed and
-			// therefore doing a stat is unnecessary
-			layersPushed.Unlock()
-			return dgst, true, nil
-		}
-	}
-	layersPushed.Unlock()
-
+func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
 	for _, dgst := range blobsums {
 	for _, dgst := range blobsums {
-		_, err := repo.Blobs(ctx).Stat(ctx, dgst)
+		descriptor, err := repo.Blobs(ctx).Stat(ctx, dgst)
 		switch err {
 		switch err {
 		case nil:
 		case nil:
-			return dgst, true, nil
+			descriptor.MediaType = schema2.MediaTypeLayer
+			return descriptor, true, nil
 		case distribution.ErrBlobUnknown:
 		case distribution.ErrBlobUnknown:
 			// nop
 			// nop
 		default:
 		default:
-			return "", false, err
-		}
-	}
-	return "", false, nil
-}
-
-// CreateV2Manifest creates a V2 manifest from an image config and set of
-// FSLayer digests.
-// FIXME: This should be moved to the distribution repo, since it will also
-// be useful for converting new manifests to the old format.
-func CreateV2Manifest(name, tag string, img *image.Image, fsLayers map[layer.DiffID]digest.Digest) (*schema1.Manifest, error) {
-	if len(img.History) == 0 {
-		return nil, errors.New("empty history when trying to create V2 manifest")
-	}
-
-	// Generate IDs for each layer
-	// For non-top-level layers, create fake V1Compatibility strings that
-	// fit the format and don't collide with anything else, but don't
-	// result in runnable images on their own.
-	type v1Compatibility struct {
-		ID              string    `json:"id"`
-		Parent          string    `json:"parent,omitempty"`
-		Comment         string    `json:"comment,omitempty"`
-		Created         time.Time `json:"created"`
-		ContainerConfig struct {
-			Cmd []string
-		} `json:"container_config,omitempty"`
-		ThrowAway bool `json:"throwaway,omitempty"`
-	}
-
-	fsLayerList := make([]schema1.FSLayer, len(img.History))
-	history := make([]schema1.History, len(img.History))
-
-	parent := ""
-	layerCounter := 0
-	for i, h := range img.History {
-		if i == len(img.History)-1 {
-			break
-		}
-
-		var diffID layer.DiffID
-		if h.EmptyLayer {
-			diffID = layer.EmptyLayer.DiffID()
-		} else {
-			if len(img.RootFS.DiffIDs) <= layerCounter {
-				return nil, errors.New("too many non-empty layers in History section")
-			}
-			diffID = img.RootFS.DiffIDs[layerCounter]
-			layerCounter++
-		}
-
-		fsLayer, present := fsLayers[diffID]
-		if !present {
-			return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
-		}
-		dgst := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent))
-		v1ID := dgst.Hex()
-
-		v1Compatibility := v1Compatibility{
-			ID:      v1ID,
-			Parent:  parent,
-			Comment: h.Comment,
-			Created: h.Created,
+			return distribution.Descriptor{}, false, err
 		}
 		}
-		v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
-		if h.EmptyLayer {
-			v1Compatibility.ThrowAway = true
-		}
-		jsonBytes, err := json.Marshal(&v1Compatibility)
-		if err != nil {
-			return nil, err
-		}
-
-		reversedIndex := len(img.History) - i - 1
-		history[reversedIndex].V1Compatibility = string(jsonBytes)
-		fsLayerList[reversedIndex] = schema1.FSLayer{BlobSum: fsLayer}
-
-		parent = v1ID
-	}
-
-	latestHistory := img.History[len(img.History)-1]
-
-	var diffID layer.DiffID
-	if latestHistory.EmptyLayer {
-		diffID = layer.EmptyLayer.DiffID()
-	} else {
-		if len(img.RootFS.DiffIDs) <= layerCounter {
-			return nil, errors.New("too many non-empty layers in History section")
-		}
-		diffID = img.RootFS.DiffIDs[layerCounter]
 	}
 	}
-	fsLayer, present := fsLayers[diffID]
-	if !present {
-		return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
-	}
-
-	fsLayerList[0] = schema1.FSLayer{BlobSum: fsLayer}
-	dgst := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent + " " + string(img.RawJSON())))
-
-	// Top-level v1compatibility string should be a modified version of the
-	// image config.
-	transformedConfig, err := v1.MakeV1ConfigFromConfig(img, dgst.Hex(), parent, latestHistory.EmptyLayer)
-	if err != nil {
-		return nil, err
-	}
-
-	history[0].V1Compatibility = string(transformedConfig)
-
-	// windows-only baselayer setup
-	if err := setupBaseLayer(history, *img.RootFS); err != nil {
-		return nil, err
-	}
-
-	return &schema1.Manifest{
-		Versioned: manifest.Versioned{
-			SchemaVersion: 1,
-		},
-		Name:         name,
-		Tag:          tag,
-		Architecture: img.Architecture,
-		FSLayers:     fsLayerList,
-		History:      history,
-	}, nil
+	return distribution.Descriptor{}, false, nil
 }
 }

+ 0 - 176
distribution/push_v2_test.go

@@ -1,176 +0,0 @@
-package distribution
-
-import (
-	"reflect"
-	"testing"
-
-	"github.com/docker/distribution/digest"
-	"github.com/docker/distribution/manifest/schema1"
-	"github.com/docker/docker/image"
-	"github.com/docker/docker/layer"
-)
-
-func TestCreateV2Manifest(t *testing.T) {
-	imgJSON := `{
-    "architecture": "amd64",
-    "config": {
-        "AttachStderr": false,
-        "AttachStdin": false,
-        "AttachStdout": false,
-        "Cmd": [
-            "/bin/sh",
-            "-c",
-            "echo hi"
-        ],
-        "Domainname": "",
-        "Entrypoint": null,
-        "Env": [
-            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-            "derived=true",
-            "asdf=true"
-        ],
-        "Hostname": "23304fc829f9",
-        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
-        "Labels": {},
-        "OnBuild": [],
-        "OpenStdin": false,
-        "StdinOnce": false,
-        "Tty": false,
-        "User": "",
-        "Volumes": null,
-        "WorkingDir": ""
-    },
-    "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001",
-    "container_config": {
-        "AttachStderr": false,
-        "AttachStdin": false,
-        "AttachStdout": false,
-        "Cmd": [
-            "/bin/sh",
-            "-c",
-            "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"
-        ],
-        "Domainname": "",
-        "Entrypoint": null,
-        "Env": [
-            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-            "derived=true",
-            "asdf=true"
-        ],
-        "Hostname": "23304fc829f9",
-        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
-        "Labels": {},
-        "OnBuild": [],
-        "OpenStdin": false,
-        "StdinOnce": false,
-        "Tty": false,
-        "User": "",
-        "Volumes": null,
-        "WorkingDir": ""
-    },
-    "created": "2015-11-04T23:06:32.365666163Z",
-    "docker_version": "1.9.0-dev",
-    "history": [
-        {
-            "created": "2015-10-31T22:22:54.690851953Z",
-            "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
-        },
-        {
-            "created": "2015-10-31T22:22:55.613815829Z",
-            "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]"
-        },
-        {
-            "created": "2015-11-04T23:06:30.934316144Z",
-            "created_by": "/bin/sh -c #(nop) ENV derived=true",
-            "empty_layer": true
-        },
-        {
-            "created": "2015-11-04T23:06:31.192097572Z",
-            "created_by": "/bin/sh -c #(nop) ENV asdf=true",
-            "empty_layer": true
-        },
-        {
-            "created": "2015-11-04T23:06:32.083868454Z",
-            "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"
-        },
-        {
-            "created": "2015-11-04T23:06:32.365666163Z",
-            "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]",
-            "empty_layer": true
-        }
-    ],
-    "os": "linux",
-    "rootfs": {
-        "diff_ids": [
-            "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
-            "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
-            "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
-        ],
-        "type": "layers"
-    }
-}`
-
-	// To fill in rawJSON
-	img, err := image.NewFromJSON([]byte(imgJSON))
-	if err != nil {
-		t.Fatalf("json decoding failed: %v", err)
-	}
-
-	fsLayers := map[layer.DiffID]digest.Digest{
-		layer.DiffID("sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1"): digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
-		layer.DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"): digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
-		layer.DiffID("sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"): digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
-	}
-
-	manifest, err := CreateV2Manifest("testrepo", "testtag", img, fsLayers)
-	if err != nil {
-		t.Fatalf("CreateV2Manifest returned error: %v", err)
-	}
-
-	if manifest.Versioned.SchemaVersion != 1 {
-		t.Fatal("SchemaVersion != 1")
-	}
-	if manifest.Name != "testrepo" {
-		t.Fatal("incorrect name in manifest")
-	}
-	if manifest.Tag != "testtag" {
-		t.Fatal("incorrect tag in manifest")
-	}
-	if manifest.Architecture != "amd64" {
-		t.Fatal("incorrect arch in manifest")
-	}
-
-	expectedFSLayers := []schema1.FSLayer{
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
-		{BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
-		{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
-	}
-
-	if len(manifest.FSLayers) != len(expectedFSLayers) {
-		t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers))
-	}
-	if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) {
-		t.Fatal("wrong FSLayers list")
-	}
-
-	expectedV1Compatibility := []string{
-		`{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"d728140d3fd23dfcac505954af0b2224b3579b177029eded62916579eb19ac64","os":"linux","parent":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","throwaway":true}`,
-		`{"id":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","parent":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]}}`,
-		`{"id":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","parent":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`,
-		`{"id":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`,
-		`{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`,
-		`{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`,
-	}
-
-	if len(manifest.History) != len(expectedV1Compatibility) {
-		t.Fatalf("wrong number of history entries: %d", len(manifest.History))
-	}
-	for i := range expectedV1Compatibility {
-		if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] {
-			t.Fatalf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility)
-		}
-	}
-}

+ 0 - 12
distribution/push_v2_unix.go

@@ -1,12 +0,0 @@
-// +build !windows
-
-package distribution
-
-import (
-	"github.com/docker/distribution/manifest/schema1"
-	"github.com/docker/docker/image"
-)
-
-func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
-	return nil
-}

+ 0 - 28
distribution/push_v2_windows.go

@@ -1,28 +0,0 @@
-// +build windows
-
-package distribution
-
-import (
-	"encoding/json"
-
-	"github.com/docker/distribution/manifest/schema1"
-	"github.com/docker/docker/image"
-)
-
-func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
-	var v1Config map[string]*json.RawMessage
-	if err := json.Unmarshal([]byte(history[len(history)-1].V1Compatibility), &v1Config); err != nil {
-		return err
-	}
-	baseID, err := json.Marshal(rootFS.BaseLayerID())
-	if err != nil {
-		return err
-	}
-	v1Config["parent"] = (*json.RawMessage)(&baseID)
-	configJSON, err := json.Marshal(v1Config)
-	if err != nil {
-		return err
-	}
-	history[len(history)-1].V1Compatibility = string(configJSON)
-	return nil
-}

+ 6 - 11
distribution/xfer/upload.go

@@ -5,7 +5,6 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/distribution/digest"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/pkg/progress"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
@@ -30,7 +29,6 @@ type uploadTransfer struct {
 	Transfer
 	Transfer
 
 
 	diffID layer.DiffID
 	diffID layer.DiffID
-	digest digest.Digest
 	err    error
 	err    error
 }
 }
 
 
@@ -43,16 +41,15 @@ type UploadDescriptor interface {
 	// DiffID should return the DiffID for this layer.
 	// DiffID should return the DiffID for this layer.
 	DiffID() layer.DiffID
 	DiffID() layer.DiffID
 	// Upload is called to perform the Upload.
 	// Upload is called to perform the Upload.
-	Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error)
+	Upload(ctx context.Context, progressOutput progress.Output) error
 }
 }
 
 
 // Upload is a blocking function which ensures the listed layers are present on
 // Upload is a blocking function which ensures the listed layers are present on
 // the remote registry. It uses the string returned by the Key method to
 // the remote registry. It uses the string returned by the Key method to
 // deduplicate uploads.
 // deduplicate uploads.
-func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) (map[layer.DiffID]digest.Digest, error) {
+func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
 	var (
 	var (
 		uploads          []*uploadTransfer
 		uploads          []*uploadTransfer
-		digests          = make(map[layer.DiffID]digest.Digest)
 		dedupDescriptors = make(map[string]struct{})
 		dedupDescriptors = make(map[string]struct{})
 	)
 	)
 
 
@@ -74,16 +71,15 @@ func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescri
 	for _, upload := range uploads {
 	for _, upload := range uploads {
 		select {
 		select {
 		case <-ctx.Done():
 		case <-ctx.Done():
-			return nil, ctx.Err()
+			return ctx.Err()
 		case <-upload.Transfer.Done():
 		case <-upload.Transfer.Done():
 			if upload.err != nil {
 			if upload.err != nil {
-				return nil, upload.err
+				return upload.err
 			}
 			}
-			digests[upload.diffID] = upload.digest
 		}
 		}
 	}
 	}
 
 
-	return digests, nil
+	return nil
 }
 }
 
 
 func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc {
 func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc {
@@ -109,9 +105,8 @@ func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFun
 
 
 			retries := 0
 			retries := 0
 			for {
 			for {
-				digest, err := descriptor.Upload(u.Transfer.Context(), progressOutput)
+				err := descriptor.Upload(u.Transfer.Context(), progressOutput)
 				if err == nil {
 				if err == nil {
-					u.digest = digest
 					break
 					break
 				}
 				}
 
 

+ 7 - 22
distribution/xfer/upload_test.go

@@ -36,12 +36,12 @@ func (u *mockUploadDescriptor) DiffID() layer.DiffID {
 }
 }
 
 
 // Upload is called to perform the upload.
 // Upload is called to perform the upload.
-func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
+func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
 	if u.currentUploads != nil {
 	if u.currentUploads != nil {
 		defer atomic.AddInt32(u.currentUploads, -1)
 		defer atomic.AddInt32(u.currentUploads, -1)
 
 
 		if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
 		if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
-			return "", errors.New("concurrency limit exceeded")
+			return errors.New("concurrency limit exceeded")
 		}
 		}
 	}
 	}
 
 
@@ -49,7 +49,7 @@ func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progre
 	for i := int64(0); i <= 10; i++ {
 	for i := int64(0); i <= 10; i++ {
 		select {
 		select {
 		case <-ctx.Done():
 		case <-ctx.Done():
-			return "", ctx.Err()
+			return ctx.Err()
 		case <-time.After(10 * time.Millisecond):
 		case <-time.After(10 * time.Millisecond):
 			progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
 			progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
 		}
 		}
@@ -57,12 +57,10 @@ func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progre
 
 
 	if u.simulateRetries != 0 {
 	if u.simulateRetries != 0 {
 		u.simulateRetries--
 		u.simulateRetries--
-		return "", errors.New("simulating retry")
+		return errors.New("simulating retry")
 	}
 	}
 
 
-	// For the mock implementation, use SHA256(DiffID) as the returned
-	// digest.
-	return digest.FromBytes([]byte(u.diffID.String())), nil
+	return nil
 }
 }
 
 
 func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
 func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
@@ -101,26 +99,13 @@ func TestSuccessfulUpload(t *testing.T) {
 	var currentUploads int32
 	var currentUploads int32
 	descriptors := uploadDescriptors(&currentUploads)
 	descriptors := uploadDescriptors(&currentUploads)
 
 
-	digests, err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
+	err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
 	if err != nil {
 	if err != nil {
 		t.Fatalf("upload error: %v", err)
 		t.Fatalf("upload error: %v", err)
 	}
 	}
 
 
 	close(progressChan)
 	close(progressChan)
 	<-progressDone
 	<-progressDone
-
-	if len(digests) != len(expectedDigests) {
-		t.Fatal("wrong number of keys in digests map")
-	}
-
-	for key, val := range expectedDigests {
-		if digests[key] != val {
-			t.Fatalf("mismatch in digest array for key %v (expected %v, got %v)", key, val, digests[key])
-		}
-		if receivedProgress[key.String()] != 10 {
-			t.Fatalf("missing or wrong progress output for %v", key)
-		}
-	}
 }
 }
 
 
 func TestCancelledUpload(t *testing.T) {
 func TestCancelledUpload(t *testing.T) {
@@ -143,7 +128,7 @@ func TestCancelledUpload(t *testing.T) {
 	}()
 	}()
 
 
 	descriptors := uploadDescriptors(nil)
 	descriptors := uploadDescriptors(nil)
-	_, err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
+	err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
 	if err != context.Canceled {
 	if err != context.Canceled {
 		t.Fatal("expected upload to be cancelled")
 		t.Fatal("expected upload to be cancelled")
 	}
 	}