Bläddra i källkod

Merge pull request #14831 from duglin/VendorDist

Vendor latest distributions so we can use the new errcode stuff
Stephen Day 10 år sedan
förälder
incheckning
4af94efcf0

+ 15 - 3
graph/pull_v2.go

@@ -18,6 +18,7 @@ import (
 	"github.com/docker/docker/trust"
 	"github.com/docker/docker/utils"
 	"github.com/docker/libtrust"
+	"golang.org/x/net/context"
 )
 
 type v2Puller struct {
@@ -58,7 +59,13 @@ func (p *v2Puller) pullV2Repository(tag string) (err error) {
 		taggedName = utils.ImageReference(p.repoInfo.LocalName, tag)
 	} else {
 		var err error
-		tags, err = p.repo.Manifests().Tags()
+
+		manSvc, err := p.repo.Manifests(context.Background())
+		if err != nil {
+			return err
+		}
+
+		tags, err = manSvc.Tags()
 		if err != nil {
 			return err
 		}
@@ -140,7 +147,7 @@ func (p *v2Puller) download(di *downloadInfo) {
 		di.err <- err
 		return
 	}
-	di.size = desc.Length
+	di.size = desc.Size
 
 	layerDownload, err := blobs.Open(nil, di.digest)
 	if err != nil {
@@ -187,7 +194,12 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
 	logrus.Debugf("Pulling tag from V2 registry: %q", tag)
 	out := p.config.OutStream
 
-	manifest, err := p.repo.Manifests().GetByTag(tag)
+	manSvc, err := p.repo.Manifests(context.Background())
+	if err != nil {
+		return false, err
+	}
+
+	manifest, err := manSvc.GetByTag(tag)
 	if err != nil {
 		return false, err
 	}

+ 6 - 1
graph/push_v2.go

@@ -16,6 +16,7 @@ import (
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
+	"golang.org/x/net/context"
 )
 
 type v2Pusher struct {
@@ -191,7 +192,11 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
 		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
 	}
 
-	return p.repo.Manifests().Put(signed)
+	manSvc, err := p.repo.Manifests(context.Background())
+	if err != nil {
+		return err
+	}
+	return manSvc.Put(signed)
 }
 
 func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) {

+ 1 - 1
hack/vendor.sh

@@ -35,7 +35,7 @@ clone git github.com/coreos/go-etcd v2.0.0
 clone git github.com/hashicorp/consul v0.5.2
 
 # get graph and distribution packages
-clone git github.com/docker/distribution 419bbc2da637d9b2a812be78ef8436df7caac70d
+clone git github.com/docker/distribution cd8ff553b6b1911be23dfeabb73e33108bcbf147
 clone git github.com/vbatts/tar-split v0.9.4
 
 clone git github.com/opencontainers/runc v0.0.2 # libcontainer

+ 47 - 0
vendor/src/github.com/docker/distribution/CONTRIBUTING.md

@@ -90,3 +90,50 @@ It's mandatory to:
 Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
 
 Have a look at a great, succesful contribution: the [Ceph driver PR](https://github.com/docker/distribution/pull/443)
+
+## Coding Style
+
+Unless explicitly stated, we follow all coding guidelines from the Go
+community. While some of these standards may seem arbitrary, they somehow seem
+to result in a solid, consistent codebase.
+
+It is possible that the code base does not currently comply with these
+guidelines. We are not looking for a massive PR that fixes this, since that
+goes against the spirit of the guidelines. All new contributions should make a
+best effort to clean up and make the code base better than they left it.
+Obviously, apply your best judgement. Remember, the goal here is to make the
+code base easier for humans to navigate and understand. Always keep that in
+mind when nudging others to comply.
+
+The rules:
+
+1. All code should be formatted with `gofmt -s`.
+2. All code should pass the default levels of
+   [`golint`](https://github.com/golang/lint).
+3. All code should follow the guidelines covered in [Effective
+   Go](http://golang.org/doc/effective_go.html) and [Go Code Review
+   Comments](https://github.com/golang/go/wiki/CodeReviewComments).
+4. Comment the code. Tell us the why, the history and the context.
+5. Document _all_ declarations and methods, even private ones. Declare
+   expectations, caveats and anything else that may be important. If a type
+   gets exported, having the comments already there will ensure it's ready.
+6. Variable name length should be proportional to its context and no longer.
+   `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
+   In practice, short methods will have short variable names and globals will
+   have longer names.
+7. No underscores in package names. If you need a compound name, step back,
+   and re-examine why you need a compound name. If you still think you need a
+   compound name, lose the underscore.
+8. No utils or helpers packages. If a function is not general enough to
+   warrant its own package, it has not been written generally enough to be a
+   part of a util package. Just leave it unexported and well-documented.
+9. All tests should run with `go test` and outside tooling should not be
+   required. No, we don't need another unit testing framework. Assertion
+   packages are acceptable if they provide _real_ incremental value.
+10. Even though we call these "rules" above, they are actually just
+    guidelines. Since you've read all the rules, you now know that.
+
+If you are having trouble getting into the mood of idiomatic Go, we recommend
+reading through [Effective Go](http://golang.org/doc/effective_go.html). The
+[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
+kool-aid is a lot easier than going thirsty.

+ 2 - 0
vendor/src/github.com/docker/distribution/README.md

@@ -7,6 +7,8 @@ for storing and distributing Docker images. It supersedes the [docker/docker-
 registry](https://github.com/docker/docker-registry) project with a new API
 design, focused around security and performance.
 
+<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
+
 This repository contains the following components:
 
 |**Component**       |Description                                                                                                                                                                                         |

+ 207 - 26
vendor/src/github.com/docker/distribution/ROADMAP.md

@@ -1,11 +1,17 @@
 # Roadmap
 
-The Distribution Project consists of several components, some of which are still being defined. This document defines the high-level goals of the project, identifies the current components, and defines the release-relationship to the Docker Platform.
+The Distribution Project consists of several components, some of which are
+still being defined. This document defines the high-level goals of the
+project, identifies the current components, and defines the release-
+relationship to the Docker Platform.
 
 * [Distribution Goals](#distribution-goals)
 * [Distribution Components](#distribution-components)
 * [Project Planning](#project-planning): release-relationship to the Docker Platform.
 
+This road map is a living document, providing an overview of the goals and
+considerations made in respect of the future of the project.
+
 ## Distribution Goals
 
 - Replace the existing [docker registry](github.com/docker/docker-registry)
@@ -30,41 +36,216 @@ implementation.
 
 ### Registry
 
-Registry 2.0 is the first release of the next-generation registry. This is primarily
-focused on implementing the [new registry
-API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), with
-a focus on security and performance.
+The new Docker registry is the main portion of the distribution repository.
+Registry 2.0 is the first release of the next-generation registry. This was
+primarily focused on implementing the [new registry
+API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
+with a focus on security and performance. 
 
-#### Registry 2.0
+Following from the Distribution project goals above, we have a set of goals
+for registry v2 that we would like to follow in the design. New features
+should be compared against these goals.
 
-Features:
+#### Data Storage and Distribution First
 
-- Faster push and pull
-- New, more efficient implementation
-- Simplified deployment
-- Full API specification for V2 protocol
-- Pluggable storage system (s3, azure, filesystem and inmemory supported)
-- Immutable manifest references ([#46](https://github.com/docker/distribution/issues/46))
-- Webhook notification system ([#42](https://github.com/docker/distribution/issues/42))
-- Native TLS Support ([#132](https://github.com/docker/distribution/pull/132))
-- Pluggable authentication system
-- Health Checks ([#230](https://github.com/docker/distribution/pull/230))
+The registry's first goal is to provide a reliable, consistent storage
+location for Docker images. The registry should only provide the minimal
+amount of indexing required to fetch image data and no more.
 
-#### Registry 2.1
+This means we should be selective in new features and API additions, including
+those that may require expensive, ever growing indexes. Requests should be
+servable in "constant time".
 
-Planned Features:
+#### Content Addressability
 
-> **NOTE:** This feature list is incomplete at this time.
+All data objects used in the registry API should be content addressable.
+Content identifiers should be secure and verifiable. This provides a secure,
+reliable base from which to build more advanced content distribution systems.
 
-- Support for Manifest V2, Schema 2 and explicit tagging objects ([#62](https://github.com/docker/distribution/issues/62), [#173](https://github.com/docker/distribution/issues/173))
-- Mirroring ([#19](https://github.com/docker/distribution/issues/19))
-- Flexible client package based on distribution interfaces ([#193](https://github.com/docker/distribution/issues/193)
+#### Content Agnostic
 
-#### Registry 2.2
+In the past, changes to the image format would require large changes in Docker
+and the Registry. By decoupling the distribution and image format, we can
+allow the formats to progress without having to coordinate between the two.
+This means that we should be focused on decoupling Docker from the registry
+just as much as decoupling the registry from Docker. Such an approach will
+allow us to unlock new distribution models that haven't been possible before.
 
-TBD
+We can take this further by saying that the new registry should be content
+agnostic. The registry provides a model of names, tags, manifests and content
+addresses and that model can be used to work with content.
 
-***
+#### Simplicity
+
+The new registry should be closer to a microservice component than its
+predecessor. This means it should have a narrower API and a low number of
+service dependencies. It should be easy to deploy.
+
+This means that other solutions should be explored before changing the API or
+adding extra dependencies. If functionality is required, can it be added as an
+extension or companion service.
+
+#### Extensibility
+
+The registry should provide extension points to add functionality. By keeping
+the scope narrow, but providing the ability to add functionality.
+
+Features like search, indexing, synchronization and registry explorers fall
+into this category. No such feature should be added unless we've found it
+impossible to do through an extension.
+
+#### Active Feature Discussions
+
+The following are feature discussions that are currently active.
+
+If you don't see your favorite, unimplemented feature, feel free to contact us
+via IRC or the mailing list and we can talk about adding it. The goal here is
+to make sure that new features go through a rigid design process before
+landing in the registry.
+
+##### Mirroring and Pull-through Caching
+
+Mirroring and pull-through caching are related but slight different. We've
+adopted the term _mirroring_ to be a proper mirror of a registry, meaning it
+has all the content the upstream would have. Providing such mirrors in the
+Docker ecosystem is dependent on a solid trust system, which is still in the
+works.
+
+The more commonly helpful feature is _pull-through caching_, where data is
+fetched from an upstream when not available in a local registry instance.
+
+Please see the following issues:
+
+- https://github.com/docker/distribution/issues/459
+
+##### Peer to Peer transfer
+
+Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
+
+##### Indexing, Search and Discovery
+
+The original registry provided some implementation of search for use with
+private registries. Support has been elided from V2 since we'd like to both
+decouple search functionality from the registry. The makes the registry
+simpler to deploy, especially in use cases where search is not needed, and
+let's us decouple the image format from the registry.
+
+There are explorations into using the catalog API and notification system to
+build external indexes. The current line of thought is that we will define a
+common search API to index and query docker images. Such a system could be run
+as a companion to a registry or set of registries to power discovery.
+
+The main issue with search and discovery is that there are so many ways to
+accomplish it. There are two aspects to this project. The first is deciding on
+how it will be done, including an API definition that can work with changing
+data formats. The second is the process of integrating with `docker search`.
+We expect that someone attempts to address the problem with the existing tools
+and propose it as a standard search API or uses it to inform a standardization
+process. Once this has been explored, we integrate with the docker client.
+
+Please see the following for more detail:
+
+- https://github.com/docker/distribution/issues/206
+
+##### Deletes
+
+> __NOTE:__ Deletes are a much asked for feature. Before requesting this
+feature or participating in discussion, we ask that you read this section in
+full and understand the problems behind deletes.
+
+While, at first glance, implementing deleting seems simple, there are a number
+mitigating factors that make many solutions not ideal or even pathological in
+the context of a registry. The following paragraph discuss the background and
+approaches that could be applied to a arrive at a solution.
+
+The goal of deletes in any system is to remove unused or unneeded data. Only
+data requested for deletion should be removed and no other data. Removing
+unintended data is worse than _not_ removing data that was requested for
+removal but ideally, both are supported. Generally, according to this rule, we
+err on holding data longer than needed, ensuring that it is only removed when
+we can be certain that it can be removed. With the current behavior, we opt to
+hold onto the data forever, ensuring that data cannot be incorrectly removed.
+
+To understand the problems with implementing deletes, one must understand the
+data model. All registry data is stored in a filesystem layout, implemented on
+a "storage driver", effectively a _virtual file system_ (VFS). The storage
+system must assume that this VFS layer will be eventually consistent and has
+poor read- after-write consistency, since this is the lower common denominator
+among the storage drivers. This is mitigated by writing values in reverse-
+dependent order, but makes wider transactional operations unsafe.
+
+Layered on the VFS model is a content-addressable _directed, acyclic graph_
+(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
+Since the same data can be referenced by multiple manifests, we only store
+data once, even if it is in different repositories. Thus, we have a set of
+blobs, referenced by tags and manifests. If we want to delete a blob we need
+to be certain that it is no longer referenced by another manifest or tag. When
+we delete a manifest, we also can try to delete the referenced blobs. Deciding
+whether or not a blob has an active reference is the crux of the problem.
+
+Conceptually, deleting a manifest and its resources is quite simple. Just find
+all the manifests, enumerate the referenced blobs and delete the blobs not in
+that set. An astute observer will recognize this as a garbage collection
+problem. As with garbage collection in programming languages, this is very
+simple when one always has a consistent view. When one adds parallelism and an
+inconsistent view of data, it becomes very challenging.
+
+A simple example can demonstrate this. Let's say we are deleting a manifest
+_A_ in one process. We scan the manifest and decide that all the blobs are
+ready for deletion. Concurrently, we have another process accepting a new
+manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
+is accepted and all the blobs are considered present, so the operation
+proceeds. The original process then deletes the referenced blobs, assuming
+they were unreferenced. The manifest _B_, which we thought had all of its data
+present, can no longer be served by the registry, since the dependent data has
+been deleted.
+
+Deleting data from the registry safely requires some way to coordinate this
+operation. The following approaches are being considered:
+
+- _Reference Counting_ - Maintain a count of references to each blob. This is
+  challenging for a number of reasons: 1. maintaining a consistent consensus
+  of reference counts across a set of Registries and 2. Building the initial
+  list of reference counts for an existing registry. These challenges can be
+  met with a consensus protocol like Paxos or Raft in the first case and a
+  necessary but simple scan in the second..
+- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
+  and find all blob references. Delete all unreferenced blobs. This approach
+  is very simple but requires disabling writes for a period of time while the
+  service reads all data. This is slow and expensive but very accurate and
+  effective.
+- _Generational GC_ - Do something similar to above but instead of blocking
+  writes, writes are sent to another storage backend while reads are broadcast
+  to the new and old backends. GC is then performed on the read-only portion.
+  Because writes land in the new backend, the data in the read-only section
+  can be safely deleted. The main drawbacks of this approach are complexity
+  and coordination.
+- _Centralized Oracle_ - Using a centralized, transactional database, we can
+  know exactly which data is referenced at any given time. This avoids
+  coordination problem by managing this data in a single location. We trade
+  off metadata scalability for simplicity and performance. This is a very good
+  option for most registry deployments. This would create a bottleneck for
+  registry metadata. However, metadata is generally not the main bottleneck
+  when serving images.
+
+Please let us know if other solutions exist that we have yet to enumerate.
+Note that for any approach, implementation is a massive consideration. For
+example, a mark-sweep based solution may seem simple but the amount of work in
+coordination offset the extra work it might take to build a _Centralized
+Oracle_. We'll accept proposals for any solution but please coordinate with us
+before dropping code.
+
+At this time, we have traded off simplicity and ease of deployment for disk
+space. Simplicity and ease of deployment tend to reduce developer involvement,
+which is currently the most expensive resource in software engineering. Taking
+on any solution for deletes will greatly effect these factors, trading off
+very cheap disk space for a complex deployment and operational story.
+
+Please see the following issues for more detail:
+
+- https://github.com/docker/distribution/issues/422
+- https://github.com/docker/distribution/issues/461
+- https://github.com/docker/distribution/issues/462
 
 ### Distribution Package 
 

+ 2 - 2
vendor/src/github.com/docker/distribution/blobs.go

@@ -49,8 +49,8 @@ type Descriptor struct {
 	// encoded as utf-8.
 	MediaType string `json:"mediaType,omitempty"`
 
-	// Length in bytes of content.
-	Length int64 `json:"length,omitempty"`
+	// Size in bytes of content.
+	Size int64 `json:"size,omitempty"`
 
 	// Digest uniquely identifies the content. A byte stream can be verified
 	// against against this digest.

+ 12 - 2
vendor/src/github.com/docker/distribution/registry.go

@@ -35,15 +35,25 @@ type Namespace interface {
 	// registry may or may not have the repository but should always return a
 	// reference.
 	Repository(ctx context.Context, name string) (Repository, error)
+
+	// Repositories fills 'repos' with a lexigraphically sorted catalog of repositories
+	// up to the size of 'repos' and returns the value 'n' for the number of entries
+	// which were filled.  'last' contains an offset in the catalog, and 'err' will be
+	// set to io.EOF if there are no more entries to obtain.
+	Repositories(ctx context.Context, repos []string, last string) (n int, err error)
 }
 
+// ManifestServiceOption is a function argument for Manifest Service methods
+type ManifestServiceOption func(ManifestService) error
+
 // Repository is a named collection of manifests and layers.
 type Repository interface {
 	// Name returns the name of the repository.
 	Name() string
 
 	// Manifests returns a reference to this repository's manifest service.
-	Manifests() ManifestService
+	// with the supplied options applied.
+	Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
 
 	// Blobs returns a reference to this repository's blob service.
 	Blobs(ctx context.Context) BlobStore
@@ -84,7 +94,7 @@ type ManifestService interface {
 	ExistsByTag(tag string) (bool, error)
 
 	// GetByTag retrieves the named manifest, if it exists.
-	GetByTag(tag string) (*manifest.SignedManifest, error)
+	GetByTag(tag string, options ...ManifestServiceOption) (*manifest.SignedManifest, error)
 
 	// TODO(stevvooe): There are several changes that need to be done to this
 	// interface:

+ 58 - 28
vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go

@@ -16,6 +16,8 @@ type ErrorCoder interface {
 // and the integer format may change and should *never* be exported.
 type ErrorCode int
 
+var _ error = ErrorCode(0)
+
 // ErrorCode just returns itself
 func (ec ErrorCode) ErrorCode() ErrorCode {
 	return ec
@@ -69,22 +71,32 @@ func (ec *ErrorCode) UnmarshalText(text []byte) error {
 // WithDetail creates a new Error struct based on the passed-in info and
 // set the Detail property appropriately
 func (ec ErrorCode) WithDetail(detail interface{}) Error {
-	if err, ok := detail.(error); ok {
-		detail = err.Error()
-	}
+	return Error{
+		Code:    ec,
+		Message: ec.Message(),
+	}.WithDetail(detail)
+}
 
+// WithArgs creates a new Error struct and sets the Args slice
+func (ec ErrorCode) WithArgs(args ...interface{}) Error {
 	return Error{
-		Code:   ec,
-		Detail: detail,
-	}
+		Code:    ec,
+		Message: ec.Message(),
+	}.WithArgs(args...)
 }
 
 // Error provides a wrapper around ErrorCode with extra Details provided.
 type Error struct {
-	Code   ErrorCode   `json:"code"`
-	Detail interface{} `json:"detail,omitempty"`
+	Code    ErrorCode   `json:"code"`
+	Message string      `json:"message"`
+	Detail  interface{} `json:"detail,omitempty"`
+
+	// TODO(duglin): See if we need an "args" property so we can do the
+	// variable substitution right before showing the message to the user
 }
 
+var _ error = Error{}
+
 // ErrorCode returns the ID/Value of this Error
 func (e Error) ErrorCode() ErrorCode {
 	return e.Code
@@ -94,12 +106,27 @@ func (e Error) ErrorCode() ErrorCode {
 func (e Error) Error() string {
 	return fmt.Sprintf("%s: %s",
 		strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)),
-		e.Code.Message())
+		e.Message)
+}
+
+// WithDetail will return a new Error, based on the current one, but with
+// some Detail info added
+func (e Error) WithDetail(detail interface{}) Error {
+	return Error{
+		Code:    e.Code,
+		Message: e.Message,
+		Detail:  detail,
+	}
 }
 
-// Message returned the human-readable error message for this Error
-func (e Error) Message() string {
-	return e.Code.Message()
+// WithArgs uses the passed-in list of interface{} as the substitution
+// variables in the Error's Message string, but returns a new Error
+func (e Error) WithArgs(args ...interface{}) Error {
+	return Error{
+		Code:    e.Code,
+		Message: fmt.Sprintf(e.Code.Message(), args...),
+		Detail:  e.Detail,
+	}
 }
 
 // ErrorDescriptor provides relevant information about a given error code.
@@ -140,6 +167,8 @@ func ParseErrorCode(value string) ErrorCode {
 // for use within the application.
 type Errors []error
 
+var _ error = Errors{}
+
 func (errs Errors) Error() string {
 	switch len(errs) {
 	case 0:
@@ -160,20 +189,11 @@ func (errs Errors) Len() int {
 	return len(errs)
 }
 
-// jsonError extends Error with 'Message' so that we can include the
-// error text, just in case the receiver of the JSON doesn't have this
-// particular ErrorCode registered
-type jsonError struct {
-	Code    ErrorCode   `json:"code"`
-	Message string      `json:"message"`
-	Detail  interface{} `json:"detail,omitempty"`
-}
-
 // MarshalJSON converts slice of error, ErrorCode or Error into a
 // slice of Error - then serializes
 func (errs Errors) MarshalJSON() ([]byte, error) {
 	var tmpErrs struct {
-		Errors []jsonError `json:"errors,omitempty"`
+		Errors []Error `json:"errors,omitempty"`
 	}
 
 	for _, daErr := range errs {
@@ -189,9 +209,16 @@ func (errs Errors) MarshalJSON() ([]byte, error) {
 
 		}
 
-		tmpErrs.Errors = append(tmpErrs.Errors, jsonError{
+		// If the Error struct was setup and they forgot to set the
+		// Message field (meaning its "") then grab it from the ErrCode
+		msg := err.Message
+		if msg == "" {
+			msg = err.Code.Message()
+		}
+
+		tmpErrs.Errors = append(tmpErrs.Errors, Error{
 			Code:    err.Code,
-			Message: err.Message(),
+			Message: msg,
 			Detail:  err.Detail,
 		})
 	}
@@ -203,7 +230,7 @@ func (errs Errors) MarshalJSON() ([]byte, error) {
 // Error or ErrorCode
 func (errs *Errors) UnmarshalJSON(data []byte) error {
 	var tmpErrs struct {
-		Errors []jsonError
+		Errors []Error
 	}
 
 	if err := json.Unmarshal(data, &tmpErrs); err != nil {
@@ -212,14 +239,17 @@ func (errs *Errors) UnmarshalJSON(data []byte) error {
 
 	var newErrs Errors
 	for _, daErr := range tmpErrs.Errors {
-		if daErr.Detail == nil {
+		// If Message is empty or exactly matches the Code's message string
+		// then just use the Code, no need for a full Error struct
+		if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
 			// Error's w/o details get converted to ErrorCode
 			newErrs = append(newErrs, daErr.Code)
 		} else {
 			// Error's w/ details are untouched
 			newErrs = append(newErrs, Error{
-				Code:   daErr.Code,
-				Detail: daErr.Detail,
+				Code:    daErr.Code,
+				Message: daErr.Message,
+				Detail:  daErr.Detail,
 			})
 		}
 	}

+ 44 - 0
vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go

@@ -0,0 +1,44 @@
+package errcode
+
+import (
+	"encoding/json"
+	"net/http"
+)
+
+// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
+// and sets the content-type header to 'application/json'. It will handle
+// ErrorCoder and Errors, and if necessary will create an envelope.
+func ServeJSON(w http.ResponseWriter, err error) error {
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	var sc int
+
+	switch errs := err.(type) {
+	case Errors:
+		if len(errs) < 1 {
+			break
+		}
+
+		if err, ok := errs[0].(ErrorCoder); ok {
+			sc = err.ErrorCode().Descriptor().HTTPStatusCode
+		}
+	case ErrorCoder:
+		sc = errs.ErrorCode().Descriptor().HTTPStatusCode
+		err = Errors{err} // create an envelope.
+	default:
+		// We just have an unhandled error type, so just place in an envelope
+		// and move along.
+		err = Errors{err}
+	}
+
+	if sc == 0 {
+		sc = http.StatusInternalServerError
+	}
+
+	w.WriteHeader(sc)
+
+	if err := json.NewEncoder(w).Encode(err); err != nil {
+		return err
+	}
+
+	return nil
+}

+ 127 - 0
vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go

@@ -87,6 +87,30 @@ var (
 		Format:      "<digest>",
 	}
 
+	linkHeader = ParameterDescriptor{
+		Name:        "Link",
+		Type:        "link",
+		Description: "RFC5988 compliant rel='next' with URL to next result set, if available",
+		Format:      `<<url>?n=<last n value>&last=<last entry from response>>; rel="next"`,
+	}
+
+	paginationParameters = []ParameterDescriptor{
+		{
+			Name:        "n",
+			Type:        "integer",
+			Description: "Limit the number of entries in each response. It not present, all entries will be returned.",
+			Format:      "<integer>",
+			Required:    false,
+		},
+		{
+			Name:        "last",
+			Type:        "string",
+			Description: "Result set will include values lexically after last.",
+			Format:      "<integer>",
+			Required:    false,
+		},
+	}
+
 	unauthorizedResponse = ResponseDescriptor{
 		Description: "The client does not have access to the repository.",
 		StatusCode:  http.StatusUnauthorized,
@@ -269,6 +293,9 @@ type ResponseDescriptor struct {
 	// Headers covers any headers that may be returned from the response.
 	Headers []ParameterDescriptor
 
+	// Fields describes any fields that may be present in the response.
+	Fields []ParameterDescriptor
+
 	// ErrorCodes enumerates the error codes that may be returned along with
 	// the response.
 	ErrorCodes []errcode.ErrorCode
@@ -427,6 +454,36 @@ var routeDescriptors = []RouteDescriptor{
 							},
 						},
 					},
+					{
+						Description:     "Return a portion of the tags for the specified repository.",
+						PathParameters:  []ParameterDescriptor{nameParameterDescriptor},
+						QueryParameters: paginationParameters,
+						Successes: []ResponseDescriptor{
+							{
+								StatusCode:  http.StatusOK,
+								Description: "A list of tags for the named repository.",
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "Length of the JSON response body.",
+										Format:      "<length>",
+									},
+									linkHeader,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format: `{
+    "name": <name>,
+    "tags": [
+        <tag>,
+        ...
+    ],
+}`,
+								},
+							},
+						},
+					},
 				},
 			},
 		},
@@ -1320,6 +1377,76 @@ var routeDescriptors = []RouteDescriptor{
 			},
 		},
 	},
+	{
+		Name:        RouteNameCatalog,
+		Path:        "/v2/_catalog",
+		Entity:      "Catalog",
+		Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.",
+		Methods: []MethodDescriptor{
+			{
+				Method:      "GET",
+				Description: "Retrieve a sorted, json list of repositories available in the registry.",
+				Requests: []RequestDescriptor{
+					{
+						Name:        "Catalog Fetch Complete",
+						Description: "Request an unabridged list of repositories available.",
+						Successes: []ResponseDescriptor{
+							{
+								Description: "Returns the unabridged list of repositories as a json response.",
+								StatusCode:  http.StatusOK,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "Length of the JSON response body.",
+										Format:      "<length>",
+									},
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format: `{
+	"repositories": [
+		<name>,
+		...
+	]
+}`,
+								},
+							},
+						},
+					},
+					{
+						Name:            "Catalog Fetch Paginated",
+						Description:     "Return the specified portion of repositories.",
+						QueryParameters: paginationParameters,
+						Successes: []ResponseDescriptor{
+							{
+								StatusCode: http.StatusOK,
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format: `{
+	"repositories": [
+		<name>,
+		...
+	]
+	"next": "<url>?last=<name>&n=<last value of n>"
+}`,
+								},
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "Length of the JSON response body.",
+										Format:      "<length>",
+									},
+									linkHeader,
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	},
 }
 
 var routeDescriptorsMap map[string]RouteDescriptor

+ 2 - 0
vendor/src/github.com/docker/distribution/registry/api/v2/routes.go

@@ -11,10 +11,12 @@ const (
 	RouteNameBlob            = "blob"
 	RouteNameBlobUpload      = "blob-upload"
 	RouteNameBlobUploadChunk = "blob-upload-chunk"
+	RouteNameCatalog         = "catalog"
 )
 
 var allEndpoints = []string{
 	RouteNameManifest,
+	RouteNameCatalog,
 	RouteNameTags,
 	RouteNameBlob,
 	RouteNameBlobUpload,

+ 12 - 0
vendor/src/github.com/docker/distribution/registry/api/v2/urls.go

@@ -100,6 +100,18 @@ func (ub *URLBuilder) BuildBaseURL() (string, error) {
 	return baseURL.String(), nil
 }
 
+// BuildCatalogURL constructs a url get a catalog of repositories
+func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) {
+	route := ub.cloneRoute(RouteNameCatalog)
+
+	catalogURL, err := route.URL()
+	if err != nil {
+		return "", err
+	}
+
+	return appendValuesURL(catalogURL, values...).String(), nil
+}
+
 // BuildTagsURL constructs a url to list the tags in the named repository.
 func (ub *URLBuilder) BuildTagsURL(name string) (string, error) {
 	route := ub.cloneRoute(RouteNameTags)

+ 1 - 4
vendor/src/github.com/docker/distribution/registry/client/errors.go

@@ -52,10 +52,7 @@ func handleErrorResponse(resp *http.Response) error {
 	if resp.StatusCode == 401 {
 		err := parseHTTPErrorResponse(resp.Body)
 		if uErr, ok := err.(*UnexpectedHTTPResponseError); ok {
-			return &errcode.Error{
-				Code:   v2.ErrorCodeUnauthorized,
-				Detail: uErr.Response,
-			}
+			return v2.ErrorCodeUnauthorized.WithDetail(uErr.Response)
 		}
 		return err
 	}

+ 134 - 9
vendor/src/github.com/docker/distribution/registry/client/repository.go

@@ -21,6 +21,83 @@ import (
 	"github.com/docker/distribution/registry/storage/cache/memory"
 )
 
+// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
+type Registry interface {
+	Repositories(ctx context.Context, repos []string, last string) (n int, err error)
+}
+
+// NewRegistry creates a registry namespace which can be used to get a listing of repositories
+func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
+	ub, err := v2.NewURLBuilderFromString(baseURL)
+	if err != nil {
+		return nil, err
+	}
+
+	client := &http.Client{
+		Transport: transport,
+		Timeout:   1 * time.Minute,
+	}
+
+	return &registry{
+		client:  client,
+		ub:      ub,
+		context: ctx,
+	}, nil
+}
+
+type registry struct {
+	client  *http.Client
+	ub      *v2.URLBuilder
+	context context.Context
+}
+
+// Repositories returns a lexigraphically sorted catalog given a base URL.  The 'entries' slice will be filled up to the size
+// of the slice, starting at the value provided in 'last'.  The number of entries will be returned along with io.EOF if there
+// are no more entries
+func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
+	var numFilled int
+	var returnErr error
+
+	values := buildCatalogValues(len(entries), last)
+	u, err := r.ub.BuildCatalogURL(values)
+	if err != nil {
+		return 0, err
+	}
+
+	resp, err := r.client.Get(u)
+	if err != nil {
+		return 0, err
+	}
+	defer resp.Body.Close()
+
+	switch resp.StatusCode {
+	case http.StatusOK:
+		var ctlg struct {
+			Repositories []string `json:"repositories"`
+		}
+		decoder := json.NewDecoder(resp.Body)
+
+		if err := decoder.Decode(&ctlg); err != nil {
+			return 0, err
+		}
+
+		for cnt := range ctlg.Repositories {
+			entries[cnt] = ctlg.Repositories[cnt]
+		}
+		numFilled = len(ctlg.Repositories)
+
+		link := resp.Header.Get("Link")
+		if link == "" {
+			returnErr = io.EOF
+		}
+
+	default:
+		return 0, handleErrorResponse(resp)
+	}
+
+	return numFilled, returnErr
+}
+
 // NewRepository creates a new Repository for the given repository name and base URL
 func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
 	if err := v2.ValidateRepositoryName(name); err != nil {
@@ -70,17 +147,20 @@ func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
 	}
 }
 
-func (r *repository) Manifests() distribution.ManifestService {
+func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
+	// todo(richardscothern): options should be sent over the wire
 	return &manifests{
 		name:   r.Name(),
 		ub:     r.ub,
 		client: r.client,
-	}
+		etags:  make(map[string]string),
+	}, nil
 }
 
 func (r *repository) Signatures() distribution.SignatureService {
+	ms, _ := r.Manifests(r.context)
 	return &signatures{
-		manifests: r.Manifests(),
+		manifests: ms,
 	}
 }
 
@@ -104,6 +184,7 @@ type manifests struct {
 	name   string
 	ub     *v2.URLBuilder
 	client *http.Client
+	etags  map[string]string
 }
 
 func (ms *manifests) Tags() ([]string, error) {
@@ -173,13 +254,40 @@ func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) {
 	return ms.GetByTag(dgst.String())
 }
 
-func (ms *manifests) GetByTag(tag string) (*manifest.SignedManifest, error) {
+// AddEtagToTag allows a client to supply an eTag to GetByTag which will
+// be used for a conditional HTTP request.  If the eTag matches, a nil
+// manifest and nil error will be returned.
+func AddEtagToTag(tagName, dgst string) distribution.ManifestServiceOption {
+	return func(ms distribution.ManifestService) error {
+		if ms, ok := ms.(*manifests); ok {
+			ms.etags[tagName] = dgst
+			return nil
+		}
+		return fmt.Errorf("etag options is a client-only option")
+	}
+}
+
+func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) {
+	for _, option := range options {
+		err := option(ms)
+		if err != nil {
+			return nil, err
+		}
+	}
+
 	u, err := ms.ub.BuildManifestURL(ms.name, tag)
 	if err != nil {
 		return nil, err
 	}
+	req, err := http.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, err
+	}
 
-	resp, err := ms.client.Get(u)
+	if _, ok := ms.etags[tag]; ok {
+		req.Header.Set("eTag", ms.etags[tag])
+	}
+	resp, err := ms.client.Do(req)
 	if err != nil {
 		return nil, err
 	}
@@ -193,8 +301,9 @@ func (ms *manifests) GetByTag(tag string) (*manifest.SignedManifest, error) {
 		if err := decoder.Decode(&sm); err != nil {
 			return nil, err
 		}
-
 		return &sm, nil
+	case http.StatusNotModified:
+		return nil, nil
 	default:
 		return nil, handleErrorResponse(resp)
 	}
@@ -206,6 +315,8 @@ func (ms *manifests) Put(m *manifest.SignedManifest) error {
 		return err
 	}
 
+	// todo(richardscothern): do something with options here when they become applicable
+
 	putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw))
 	if err != nil {
 		return err
@@ -309,7 +420,7 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea
 		return nil, err
 	}
 
-	return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Length), nil
+	return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Size), nil
 }
 
 func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
@@ -332,7 +443,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut
 
 	desc := distribution.Descriptor{
 		MediaType: mediaType,
-		Length:    int64(len(p)),
+		Size:      int64(len(p)),
 		Digest:    dgstr.Digest(),
 	}
 
@@ -401,7 +512,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
 
 		return distribution.Descriptor{
 			MediaType: resp.Header.Get("Content-Type"),
-			Length:    length,
+			Size:      length,
 			Digest:    dgst,
 		}, nil
 	case http.StatusNotFound:
@@ -410,3 +521,17 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
 		return distribution.Descriptor{}, handleErrorResponse(resp)
 	}
 }
+
+func buildCatalogValues(maxEntries int, last string) url.Values {
+	values := url.Values{}
+
+	if maxEntries > 0 {
+		values.Add("n", strconv.Itoa(maxEntries))
+	}
+
+	if last != "" {
+		values.Add("last", last)
+	}
+
+	return values
+}

+ 2 - 2
vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go

@@ -23,8 +23,8 @@ func ValidateDescriptor(desc distribution.Descriptor) error {
 		return err
 	}
 
-	if desc.Length < 0 {
-		return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Length)
+	if desc.Size < 0 {
+		return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
 	}
 
 	if desc.MediaType == "" {

+ 3 - 3
vendor/src/github.com/docker/distribution/registry/storage/cache/suite.go

@@ -35,14 +35,14 @@ func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context,
 
 	if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{
 		Digest:    "sha384:abc",
-		Length:    10,
+		Size:      10,
 		MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat {
 		t.Fatalf("expected error with invalid digest: %v", err)
 	}
 
 	if err := cache.SetDescriptor(ctx, "sha384:abc", distribution.Descriptor{
 		Digest:    "",
-		Length:    10,
+		Size:      10,
 		MediaType: "application/octet-stream"}); err == nil {
 		t.Fatalf("expected error setting value on invalid descriptor")
 	}
@@ -60,7 +60,7 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi
 	localDigest := digest.Digest("sha384:abc")
 	expected := distribution.Descriptor{
 		Digest:    "sha256:abc",
-		Length:    10,
+		Size:      10,
 		MediaType: "application/octet-stream"}
 
 	cache, err := provider.RepositoryScoped("foo/bar")