Merge pull request #13576 from stevvooe/verify-digests
Properly verify manifests and layer digests on pull
This commit is contained in:
commit
274baf70bf
21 changed files with 1001 additions and 197 deletions
|
@ -8,92 +8,164 @@ import (
|
||||||
"github.com/docker/distribution/digest"
|
"github.com/docker/distribution/digest"
|
||||||
"github.com/docker/docker/registry"
|
"github.com/docker/docker/registry"
|
||||||
"github.com/docker/docker/trust"
|
"github.com/docker/docker/trust"
|
||||||
"github.com/docker/docker/utils"
|
|
||||||
"github.com/docker/libtrust"
|
"github.com/docker/libtrust"
|
||||||
)
|
)
|
||||||
|
|
||||||
// loadManifest loads a manifest from a byte array and verifies its content.
|
// loadManifest loads a manifest from a byte array and verifies its content,
|
||||||
// The signature must be verified or an error is returned. If the manifest
|
// returning the local digest, the manifest itself, whether or not it was
|
||||||
// contains no signatures by a trusted key for the name in the manifest, the
|
// verified. If ref is a digest, rather than a tag, this will be treated as
|
||||||
// image is not considered verified. The parsed manifest object and a boolean
|
// the local digest. An error will be returned if the signature verification
|
||||||
// for whether the manifest is verified is returned.
|
// fails, local digest verification fails and, if provided, the remote digest
|
||||||
func (s *TagStore) loadManifest(manifestBytes []byte, dgst, ref string) (*registry.ManifestData, bool, error) {
|
// verification fails. The boolean return will only be false without error on
|
||||||
sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures")
|
// the failure of signatures trust check.
|
||||||
|
func (s *TagStore) loadManifest(manifestBytes []byte, ref string, remoteDigest digest.Digest) (digest.Digest, *registry.ManifestData, bool, error) {
|
||||||
|
payload, keys, err := unpackSignedManifest(manifestBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, fmt.Errorf("error parsing payload: %s", err)
|
return "", nil, false, fmt.Errorf("error unpacking manifest: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
keys, err := sig.Verify()
|
// TODO(stevvooe): It would be a lot better here to build up a stack of
|
||||||
if err != nil {
|
// verifiers, then push the bytes one time for signatures and digests, but
|
||||||
return nil, false, fmt.Errorf("error verifying payload: %s", err)
|
// the manifests are typically small, so this optimization is not worth
|
||||||
}
|
// hacking this code without further refactoring.
|
||||||
|
|
||||||
payload, err := sig.Payload()
|
var localDigest digest.Digest
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("error retrieving payload: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var manifestDigest digest.Digest
|
// Verify the local digest, if present in ref. ParseDigest will validate
|
||||||
|
// that the ref is a digest and verify against that if present. Otherwize
|
||||||
|
// (on error), we simply compute the localDigest and proceed.
|
||||||
|
if dgst, err := digest.ParseDigest(ref); err == nil {
|
||||||
|
// verify the manifest against local ref
|
||||||
|
if err := verifyDigest(dgst, payload); err != nil {
|
||||||
|
return "", nil, false, fmt.Errorf("verifying local digest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if dgst != "" {
|
localDigest = dgst
|
||||||
manifestDigest, err = digest.ParseDigest(dgst)
|
} else {
|
||||||
|
// We don't have a local digest, since we are working from a tag.
|
||||||
|
// Compute the digest of the payload and return that.
|
||||||
|
logrus.Debugf("provided manifest reference %q is not a digest: %v", ref, err)
|
||||||
|
localDigest, err = digest.FromBytes(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, fmt.Errorf("invalid manifest digest from registry: %s", err)
|
// near impossible
|
||||||
}
|
logrus.Errorf("error calculating local digest during tag pull: %v", err)
|
||||||
|
return "", nil, false, err
|
||||||
dgstVerifier, err := digest.NewDigestVerifier(manifestDigest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, fmt.Errorf("unable to verify manifest digest from registry: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dgstVerifier.Write(payload)
|
|
||||||
|
|
||||||
if !dgstVerifier.Verified() {
|
|
||||||
computedDigest, _ := digest.FromBytes(payload)
|
|
||||||
return nil, false, fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", manifestDigest, computedDigest)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if utils.DigestReference(ref) && ref != manifestDigest.String() {
|
// verify against the remote digest, if available
|
||||||
return nil, false, fmt.Errorf("mismatching image manifest digest: got %q, expected %q", manifestDigest, ref)
|
if remoteDigest != "" {
|
||||||
|
if err := verifyDigest(remoteDigest, payload); err != nil {
|
||||||
|
return "", nil, false, fmt.Errorf("verifying remote digest: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var manifest registry.ManifestData
|
var manifest registry.ManifestData
|
||||||
if err := json.Unmarshal(payload, &manifest); err != nil {
|
if err := json.Unmarshal(payload, &manifest); err != nil {
|
||||||
return nil, false, fmt.Errorf("error unmarshalling manifest: %s", err)
|
return "", nil, false, fmt.Errorf("error unmarshalling manifest: %s", err)
|
||||||
}
|
}
|
||||||
if manifest.SchemaVersion != 1 {
|
|
||||||
return nil, false, fmt.Errorf("unsupported schema version: %d", manifest.SchemaVersion)
|
// validate the contents of the manifest
|
||||||
|
if err := validateManifest(&manifest); err != nil {
|
||||||
|
return "", nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var verified bool
|
var verified bool
|
||||||
|
verified, err = s.verifyTrustedKeys(manifest.Name, keys)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, false, fmt.Errorf("error verifying trusted keys: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return localDigest, &manifest, verified, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackSignedManifest takes the raw, signed manifest bytes, unpacks the jws
|
||||||
|
// and returns the payload and public keys used to signed the manifest.
|
||||||
|
// Signatures are verified for authenticity but not against the trust store.
|
||||||
|
func unpackSignedManifest(p []byte) ([]byte, []libtrust.PublicKey, error) {
|
||||||
|
sig, err := libtrust.ParsePrettySignature(p, "signatures")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("error parsing payload: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
keys, err := sig.Verify()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("error verifying payload: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, err := sig.Payload()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("error retrieving payload: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return payload, keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyTrustedKeys checks the keys provided against the trust store,
|
||||||
|
// ensuring that the provided keys are trusted for the namespace. The keys
|
||||||
|
// provided from this method must come from the signatures provided as part of
|
||||||
|
// the manifest JWS package, obtained from unpackSignedManifest or libtrust.
|
||||||
|
func (s *TagStore) verifyTrustedKeys(namespace string, keys []libtrust.PublicKey) (verified bool, err error) {
|
||||||
|
if namespace[0] != '/' {
|
||||||
|
namespace = "/" + namespace
|
||||||
|
}
|
||||||
|
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
namespace := manifest.Name
|
|
||||||
if namespace[0] != '/' {
|
|
||||||
namespace = "/" + namespace
|
|
||||||
}
|
|
||||||
b, err := key.MarshalJSON()
|
b, err := key.MarshalJSON()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, fmt.Errorf("error marshalling public key: %s", err)
|
return false, fmt.Errorf("error marshalling public key: %s", err)
|
||||||
}
|
}
|
||||||
// Check key has read/write permission (0x03)
|
// Check key has read/write permission (0x03)
|
||||||
v, err := s.trustService.CheckKey(namespace, b, 0x03)
|
v, err := s.trustService.CheckKey(namespace, b, 0x03)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
vErr, ok := err.(trust.NotVerifiedError)
|
vErr, ok := err.(trust.NotVerifiedError)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false, fmt.Errorf("error running key check: %s", err)
|
return false, fmt.Errorf("error running key check: %s", err)
|
||||||
}
|
}
|
||||||
logrus.Debugf("Key check result: %v", vErr)
|
logrus.Debugf("Key check result: %v", vErr)
|
||||||
}
|
}
|
||||||
verified = v
|
verified = v
|
||||||
if verified {
|
|
||||||
logrus.Debug("Key check result: verified")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return &manifest, verified, nil
|
|
||||||
|
if verified {
|
||||||
|
logrus.Debug("Key check result: verified")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkValidManifest(manifest *registry.ManifestData) error {
|
// verifyDigest checks the contents of p against the provided digest. Note
|
||||||
|
// that for manifests, this is the signed payload and not the raw bytes with
|
||||||
|
// signatures.
|
||||||
|
func verifyDigest(dgst digest.Digest, p []byte) error {
|
||||||
|
if err := dgst.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("error validating digest %q: %v", dgst, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifier, err := digest.NewDigestVerifier(dgst)
|
||||||
|
if err != nil {
|
||||||
|
// There are not many ways this can go wrong: if it does, its
|
||||||
|
// fatal. Likley, the cause would be poor validation of the
|
||||||
|
// incoming reference.
|
||||||
|
return fmt.Errorf("error creating verifier for digest %q: %v", dgst, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := verifier.Write(p); err != nil {
|
||||||
|
return fmt.Errorf("error writing payload to digest verifier (verifier target %q): %v", dgst, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !verifier.Verified() {
|
||||||
|
return fmt.Errorf("verification against digest %q failed", dgst)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateManifest(manifest *registry.ManifestData) error {
|
||||||
|
if manifest.SchemaVersion != 1 {
|
||||||
|
return fmt.Errorf("unsupported schema version: %d", manifest.SchemaVersion)
|
||||||
|
}
|
||||||
|
|
||||||
if len(manifest.FSLayers) != len(manifest.History) {
|
if len(manifest.FSLayers) != len(manifest.History) {
|
||||||
return fmt.Errorf("length of history not equal to number of layers")
|
return fmt.Errorf("length of history not equal to number of layers")
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,11 +8,13 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/digest"
|
||||||
"github.com/docker/docker/image"
|
"github.com/docker/docker/image"
|
||||||
"github.com/docker/docker/pkg/tarsum"
|
"github.com/docker/docker/pkg/tarsum"
|
||||||
"github.com/docker/docker/registry"
|
"github.com/docker/docker/registry"
|
||||||
"github.com/docker/docker/runconfig"
|
"github.com/docker/docker/runconfig"
|
||||||
"github.com/docker/docker/utils"
|
"github.com/docker/docker/utils"
|
||||||
|
"github.com/docker/libtrust"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -181,3 +183,121 @@ func TestManifestTarsumCache(t *testing.T) {
|
||||||
t.Fatalf("Unexpected json value\nExpected:\n%s\nActual:\n%s", v1compat, manifest.History[0].V1Compatibility)
|
t.Fatalf("Unexpected json value\nExpected:\n%s\nActual:\n%s", v1compat, manifest.History[0].V1Compatibility)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestManifestDigestCheck ensures that loadManifest properly verifies the
|
||||||
|
// remote and local digest.
|
||||||
|
func TestManifestDigestCheck(t *testing.T) {
|
||||||
|
tmp, err := utils.TestDirectory("")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmp)
|
||||||
|
store := mkTestTagStore(tmp, t)
|
||||||
|
defer store.graph.driver.Cleanup()
|
||||||
|
|
||||||
|
archive, err := fakeTar()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
img := &image.Image{ID: testManifestImageID}
|
||||||
|
if err := store.graph.Register(img, archive); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := store.Tag(testManifestImageName, testManifestTag, testManifestImageID, false); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cs, err := img.GetCheckSum(store.graph.ImageRoot(testManifestImageID)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if cs != "" {
|
||||||
|
t.Fatalf("Non-empty checksum file after register")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate manifest
|
||||||
|
payload, err := store.newManifest(testManifestImageName, testManifestImageName, testManifestTag)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error generating test manifest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pk, err := libtrust.GenerateECP256PrivateKey()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error generating private key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sig, err := libtrust.NewJSONSignature(payload)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating signature: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sig.Sign(pk); err != nil {
|
||||||
|
t.Fatalf("error signing manifest bytes: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
signedBytes, err := sig.PrettySignature("signatures")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error getting signed bytes: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst, err := digest.FromBytes(payload)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error getting digest of manifest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// use this as the "bad" digest
|
||||||
|
zeroDigest, err := digest.FromBytes([]byte{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error making zero digest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote and local match, everything should look good
|
||||||
|
local, _, _, err := store.loadManifest(signedBytes, dgst.String(), dgst)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error verifying local and remote digest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if local != dgst {
|
||||||
|
t.Fatalf("local digest not correctly calculated: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// remote and no local, since pulling by tag
|
||||||
|
local, _, _, err = store.loadManifest(signedBytes, "tag", dgst)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error verifying tag pull and remote digest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if local != dgst {
|
||||||
|
t.Fatalf("local digest not correctly calculated: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// remote and differing local, this is the most important to fail
|
||||||
|
local, _, _, err = store.loadManifest(signedBytes, zeroDigest.String(), dgst)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("error expected when verifying with differing local digest")
|
||||||
|
}
|
||||||
|
|
||||||
|
// no remote, no local (by tag)
|
||||||
|
local, _, _, err = store.loadManifest(signedBytes, "tag", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error verifying manifest without remote digest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if local != dgst {
|
||||||
|
t.Fatalf("local digest not correctly calculated: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// no remote, with local
|
||||||
|
local, _, _, err = store.loadManifest(signedBytes, dgst.String(), "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error verifying manifest without remote digest: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if local != dgst {
|
||||||
|
t.Fatalf("local digest not correctly calculated: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// bad remote, we fail the check.
|
||||||
|
local, _, _, err = store.loadManifest(signedBytes, dgst.String(), zeroDigest)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("error expected when verifying with differing remote digest")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -457,17 +457,6 @@ func WriteStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamF
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// downloadInfo is used to pass information from download to extractor
|
|
||||||
type downloadInfo struct {
|
|
||||||
imgJSON []byte
|
|
||||||
img *image.Image
|
|
||||||
digest digest.Digest
|
|
||||||
tmpFile *os.File
|
|
||||||
length int64
|
|
||||||
downloaded bool
|
|
||||||
err chan error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter) error {
|
func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter) error {
|
||||||
endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
|
endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -517,27 +506,34 @@ func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo
|
||||||
func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, auth *registry.RequestAuthorization) (bool, error) {
|
func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, auth *registry.RequestAuthorization) (bool, error) {
|
||||||
logrus.Debugf("Pulling tag from V2 registry: %q", tag)
|
logrus.Debugf("Pulling tag from V2 registry: %q", tag)
|
||||||
|
|
||||||
manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
|
remoteDigest, manifestBytes, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadManifest ensures that the manifest payload has the expected digest
|
// loadManifest ensures that the manifest payload has the expected digest
|
||||||
// if the tag is a digest reference.
|
// if the tag is a digest reference.
|
||||||
manifest, verified, err := s.loadManifest(manifestBytes, manifestDigest, tag)
|
localDigest, manifest, verified, err := s.loadManifest(manifestBytes, tag, remoteDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("error verifying manifest: %s", err)
|
return false, fmt.Errorf("error verifying manifest: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := checkValidManifest(manifest); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if verified {
|
if verified {
|
||||||
logrus.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
|
logrus.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
|
||||||
}
|
}
|
||||||
out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))
|
out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))
|
||||||
|
|
||||||
|
// downloadInfo is used to pass information from download to extractor
|
||||||
|
type downloadInfo struct {
|
||||||
|
imgJSON []byte
|
||||||
|
img *image.Image
|
||||||
|
digest digest.Digest
|
||||||
|
tmpFile *os.File
|
||||||
|
length int64
|
||||||
|
downloaded bool
|
||||||
|
err chan error
|
||||||
|
}
|
||||||
|
|
||||||
downloads := make([]downloadInfo, len(manifest.FSLayers))
|
downloads := make([]downloadInfo, len(manifest.FSLayers))
|
||||||
|
|
||||||
for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
|
for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
|
||||||
|
@ -610,8 +606,7 @@ func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *regis
|
||||||
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil))
|
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil))
|
||||||
|
|
||||||
if !verifier.Verified() {
|
if !verifier.Verified() {
|
||||||
logrus.Infof("Image verification failed: checksum mismatch for %q", di.digest.String())
|
return fmt.Errorf("image layer digest verification failed for %q", di.digest)
|
||||||
verified = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
|
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
|
||||||
|
@ -688,15 +683,33 @@ func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *regis
|
||||||
out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
|
out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
|
||||||
}
|
}
|
||||||
|
|
||||||
if manifestDigest != "" {
|
if localDigest != remoteDigest { // this is not a verification check.
|
||||||
out.Write(sf.FormatStatus("", "Digest: %s", manifestDigest))
|
// NOTE(stevvooe): This is a very defensive branch and should never
|
||||||
|
// happen, since all manifest digest implementations use the same
|
||||||
|
// algorithm.
|
||||||
|
logrus.WithFields(
|
||||||
|
logrus.Fields{
|
||||||
|
"local": localDigest,
|
||||||
|
"remote": remoteDigest,
|
||||||
|
}).Debugf("local digest does not match remote")
|
||||||
|
|
||||||
|
out.Write(sf.FormatStatus("", "Remote Digest: %s", remoteDigest))
|
||||||
}
|
}
|
||||||
|
|
||||||
if utils.DigestReference(tag) {
|
out.Write(sf.FormatStatus("", "Digest: %s", localDigest))
|
||||||
if err = s.SetDigest(repoInfo.LocalName, tag, downloads[0].img.ID); err != nil {
|
|
||||||
|
if tag == localDigest.String() {
|
||||||
|
// TODO(stevvooe): Ideally, we should always set the digest so we can
|
||||||
|
// use the digest whether we pull by it or not. Unfortunately, the tag
|
||||||
|
// store treats the digest as a separate tag, meaning there may be an
|
||||||
|
// untagged digest image that would seem to be dangling by a user.
|
||||||
|
|
||||||
|
if err = s.SetDigest(repoInfo.LocalName, localDigest.String(), downloads[0].img.ID); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
|
|
||||||
|
if !utils.DigestReference(tag) {
|
||||||
// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
|
// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
|
||||||
if err = s.Tag(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
|
if err = s.Tag(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
|
|
@ -413,7 +413,7 @@ func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, o
|
||||||
m.History[i] = ®istry.ManifestHistory{V1Compatibility: string(jsonData)}
|
m.History[i] = ®istry.ManifestHistory{V1Compatibility: string(jsonData)}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := checkValidManifest(m); err != nil {
|
if err := validateManifest(m); err != nil {
|
||||||
return fmt.Errorf("invalid manifest: %s", err)
|
return fmt.Errorf("invalid manifest: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"github.com/docker/docker/daemon/graphdriver"
|
"github.com/docker/docker/daemon/graphdriver"
|
||||||
_ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
|
_ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
|
||||||
"github.com/docker/docker/image"
|
"github.com/docker/docker/image"
|
||||||
|
"github.com/docker/docker/trust"
|
||||||
"github.com/docker/docker/utils"
|
"github.com/docker/docker/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -60,9 +61,16 @@ func mkTestTagStore(root string, t *testing.T) *TagStore {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
trust, err := trust.NewTrustStore(root + "/trust")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
tagCfg := &TagStoreConfig{
|
tagCfg := &TagStoreConfig{
|
||||||
Graph: graph,
|
Graph: graph,
|
||||||
Events: events.New(),
|
Events: events.New(),
|
||||||
|
Trust: trust,
|
||||||
}
|
}
|
||||||
store, err := NewTagStore(path.Join(root, "tags"), tagCfg)
|
store, err := NewTagStore(path.Join(root, "tags"), tagCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -60,7 +60,7 @@ clone git github.com/vishvananda/netns 008d17ae001344769b031375bdb38a86219154c6
|
||||||
clone git github.com/vishvananda/netlink 8eb64238879fed52fd51c5b30ad20b928fb4c36c
|
clone git github.com/vishvananda/netlink 8eb64238879fed52fd51c5b30ad20b928fb4c36c
|
||||||
|
|
||||||
# get distribution packages
|
# get distribution packages
|
||||||
clone git github.com/docker/distribution d957768537c5af40e4f4cd96871f7b2bde9e2923
|
clone git github.com/docker/distribution b9eeb328080d367dbde850ec6e94f1e4ac2b5efe
|
||||||
mv src/github.com/docker/distribution/digest tmp-digest
|
mv src/github.com/docker/distribution/digest tmp-digest
|
||||||
mv src/github.com/docker/distribution/registry/api tmp-api
|
mv src/github.com/docker/distribution/registry/api tmp-api
|
||||||
rm -rf src/github.com/docker/distribution
|
rm -rf src/github.com/docker/distribution
|
||||||
|
|
|
@ -68,10 +68,15 @@ func (r *Session) GetV2Authorization(ep *Endpoint, imageName string, readOnly bo
|
||||||
// 1.c) if anything else, err
|
// 1.c) if anything else, err
|
||||||
// 2) PUT the created/signed manifest
|
// 2) PUT the created/signed manifest
|
||||||
//
|
//
|
||||||
func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, auth *RequestAuthorization) ([]byte, string, error) {
|
|
||||||
|
// GetV2ImageManifest simply fetches the bytes of a manifest and the remote
|
||||||
|
// digest, if available in the request. Note that the application shouldn't
|
||||||
|
// rely on the untrusted remoteDigest, and should also verify against a
|
||||||
|
// locally provided digest, if applicable.
|
||||||
|
func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, auth *RequestAuthorization) (remoteDigest digest.Digest, p []byte, err error) {
|
||||||
routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)
|
routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
method := "GET"
|
method := "GET"
|
||||||
|
@ -79,31 +84,45 @@ func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, au
|
||||||
|
|
||||||
req, err := http.NewRequest(method, routeURL, nil)
|
req, err := http.NewRequest(method, routeURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := auth.Authorize(req); err != nil {
|
if err := auth.Authorize(req); err != nil {
|
||||||
return nil, "", err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := r.client.Do(req)
|
res, err := r.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
|
|
||||||
if res.StatusCode != 200 {
|
if res.StatusCode != 200 {
|
||||||
if res.StatusCode == 401 {
|
if res.StatusCode == 401 {
|
||||||
return nil, "", errLoginRequired
|
return "", nil, errLoginRequired
|
||||||
} else if res.StatusCode == 404 {
|
} else if res.StatusCode == 404 {
|
||||||
return nil, "", ErrDoesNotExist
|
return "", nil, ErrDoesNotExist
|
||||||
}
|
}
|
||||||
return nil, "", httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res)
|
return "", nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res)
|
||||||
}
|
}
|
||||||
|
|
||||||
manifestBytes, err := ioutil.ReadAll(res.Body)
|
p, err = ioutil.ReadAll(res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", fmt.Errorf("Error while reading the http response: %s", err)
|
return "", nil, fmt.Errorf("Error while reading the http response: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return manifestBytes, res.Header.Get(DockerDigestHeader), nil
|
dgstHdr := res.Header.Get(DockerDigestHeader)
|
||||||
|
if dgstHdr != "" {
|
||||||
|
remoteDigest, err = digest.ParseDigest(dgstHdr)
|
||||||
|
if err != nil {
|
||||||
|
// NOTE(stevvooe): Including the remote digest is optional. We
|
||||||
|
// don't need to verify against it, but it is good practice.
|
||||||
|
remoteDigest = ""
|
||||||
|
logrus.Debugf("error parsing remote digest when fetching %v: %v", routeURL, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// - Succeeded to head image blob (already exists)
|
// - Succeeded to head image blob (already exists)
|
||||||
|
|
|
@ -2,7 +2,6 @@ package digest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/sha256"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
|
@ -16,6 +15,7 @@ import (
|
||||||
const (
|
const (
|
||||||
// DigestTarSumV1EmptyTar is the digest for the empty tar file.
|
// DigestTarSumV1EmptyTar is the digest for the empty tar file.
|
||||||
DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||||
|
|
||||||
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
|
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
|
||||||
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||||
)
|
)
|
||||||
|
@ -39,7 +39,7 @@ const (
|
||||||
type Digest string
|
type Digest string
|
||||||
|
|
||||||
// NewDigest returns a Digest from alg and a hash.Hash object.
|
// NewDigest returns a Digest from alg and a hash.Hash object.
|
||||||
func NewDigest(alg string, h hash.Hash) Digest {
|
func NewDigest(alg Algorithm, h hash.Hash) Digest {
|
||||||
return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil)))
|
return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,13 +72,13 @@ func ParseDigest(s string) (Digest, error) {
|
||||||
|
|
||||||
// FromReader returns the most valid digest for the underlying content.
|
// FromReader returns the most valid digest for the underlying content.
|
||||||
func FromReader(rd io.Reader) (Digest, error) {
|
func FromReader(rd io.Reader) (Digest, error) {
|
||||||
h := sha256.New()
|
digester := Canonical.New()
|
||||||
|
|
||||||
if _, err := io.Copy(h, rd); err != nil {
|
if _, err := io.Copy(digester.Hash(), rd); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewDigest("sha256", h), nil
|
return digester.Digest(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FromTarArchive produces a tarsum digest from reader rd.
|
// FromTarArchive produces a tarsum digest from reader rd.
|
||||||
|
@ -131,8 +131,8 @@ func (d Digest) Validate() error {
|
||||||
return ErrDigestInvalidFormat
|
return ErrDigestInvalidFormat
|
||||||
}
|
}
|
||||||
|
|
||||||
switch s[:i] {
|
switch Algorithm(s[:i]) {
|
||||||
case "sha256", "sha384", "sha512":
|
case SHA256, SHA384, SHA512:
|
||||||
break
|
break
|
||||||
default:
|
default:
|
||||||
return ErrDigestUnsupported
|
return ErrDigestUnsupported
|
||||||
|
@ -143,8 +143,8 @@ func (d Digest) Validate() error {
|
||||||
|
|
||||||
// Algorithm returns the algorithm portion of the digest. This will panic if
|
// Algorithm returns the algorithm portion of the digest. This will panic if
|
||||||
// the underlying digest is not in a valid format.
|
// the underlying digest is not in a valid format.
|
||||||
func (d Digest) Algorithm() string {
|
func (d Digest) Algorithm() Algorithm {
|
||||||
return string(d[:d.sepIndex()])
|
return Algorithm(d[:d.sepIndex()])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hex returns the hex digest portion of the digest. This will panic if the
|
// Hex returns the hex digest portion of the digest. This will panic if the
|
||||||
|
|
|
@ -10,7 +10,7 @@ func TestParseDigest(t *testing.T) {
|
||||||
for _, testcase := range []struct {
|
for _, testcase := range []struct {
|
||||||
input string
|
input string
|
||||||
err error
|
err error
|
||||||
algorithm string
|
algorithm Algorithm
|
||||||
hex string
|
hex string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,44 +1,95 @@
|
||||||
package digest
|
package digest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
"crypto"
|
||||||
"hash"
|
"hash"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Digester calculates the digest of written data. It is functionally
|
// Algorithm identifies and implementation of a digester by an identifier.
|
||||||
// equivalent to hash.Hash but provides methods for returning the Digest type
|
// Note the that this defines both the hash algorithm used and the string
|
||||||
// rather than raw bytes.
|
// encoding.
|
||||||
type Digester struct {
|
type Algorithm string
|
||||||
alg string
|
|
||||||
hash hash.Hash
|
// supported digest types
|
||||||
|
const (
|
||||||
|
SHA256 Algorithm = "sha256" // sha256 with hex encoding
|
||||||
|
SHA384 Algorithm = "sha384" // sha384 with hex encoding
|
||||||
|
SHA512 Algorithm = "sha512" // sha512 with hex encoding
|
||||||
|
TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only
|
||||||
|
|
||||||
|
// Canonical is the primary digest algorithm used with the distribution
|
||||||
|
// project. Other digests may be used but this one is the primary storage
|
||||||
|
// digest.
|
||||||
|
Canonical = SHA256
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// TODO(stevvooe): Follow the pattern of the standard crypto package for
|
||||||
|
// registration of digests. Effectively, we are a registerable set and
|
||||||
|
// common symbol access.
|
||||||
|
|
||||||
|
// algorithms maps values to hash.Hash implementations. Other algorithms
|
||||||
|
// may be available but they cannot be calculated by the digest package.
|
||||||
|
algorithms = map[Algorithm]crypto.Hash{
|
||||||
|
SHA256: crypto.SHA256,
|
||||||
|
SHA384: crypto.SHA384,
|
||||||
|
SHA512: crypto.SHA512,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Available returns true if the digest type is available for use. If this
|
||||||
|
// returns false, New and Hash will return nil.
|
||||||
|
func (a Algorithm) Available() bool {
|
||||||
|
h, ok := algorithms[a]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// check availability of the hash, as well
|
||||||
|
return h.Available()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDigester create a new Digester with the given hashing algorithm and instance
|
// New returns a new digester for the specified algorithm. If the algorithm
|
||||||
// of that algo's hasher.
|
// does not have a digester implementation, nil will be returned. This can be
|
||||||
func NewDigester(alg string, h hash.Hash) Digester {
|
// checked by calling Available before calling New.
|
||||||
return Digester{
|
func (a Algorithm) New() Digester {
|
||||||
alg: alg,
|
return &digester{
|
||||||
hash: h,
|
alg: a,
|
||||||
|
hash: a.Hash(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCanonicalDigester is a convenience function to create a new Digester with
|
// Hash returns a new hash as used by the algorithm. If not available, nil is
|
||||||
// out default settings.
|
// returned. Make sure to check Available before calling.
|
||||||
func NewCanonicalDigester() Digester {
|
func (a Algorithm) Hash() hash.Hash {
|
||||||
return NewDigester("sha256", sha256.New())
|
if !a.Available() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return algorithms[a].New()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write data to the digester. These writes cannot fail.
|
// TODO(stevvooe): Allow resolution of verifiers using the digest type and
|
||||||
func (d *Digester) Write(p []byte) (n int, err error) {
|
// this registration system.
|
||||||
return d.hash.Write(p)
|
|
||||||
|
// Digester calculates the digest of written data. Writes should go directly
|
||||||
|
// to the return value of Hash, while calling Digest will return the current
|
||||||
|
// value of the digest.
|
||||||
|
type Digester interface {
|
||||||
|
Hash() hash.Hash // provides direct access to underlying hash instance.
|
||||||
|
Digest() Digest
|
||||||
}
|
}
|
||||||
|
|
||||||
// Digest returns the current digest for this digester.
|
// digester provides a simple digester definition that embeds a hasher.
|
||||||
func (d *Digester) Digest() Digest {
|
type digester struct {
|
||||||
|
alg Algorithm
|
||||||
|
hash hash.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *digester) Hash() hash.Hash {
|
||||||
|
return d.hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *digester) Digest() Digest {
|
||||||
return NewDigest(d.alg, d.hash)
|
return NewDigest(d.alg, d.hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset the state of the digester.
|
|
||||||
func (d *Digester) Reset() {
|
|
||||||
d.hash.Reset()
|
|
||||||
}
|
|
||||||
|
|
195
vendor/src/github.com/docker/distribution/digest/set.go
vendored
Normal file
195
vendor/src/github.com/docker/distribution/digest/set.go
vendored
Normal file
|
@ -0,0 +1,195 @@
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrDigestNotFound is used when a matching digest
|
||||||
|
// could not be found in a set.
|
||||||
|
ErrDigestNotFound = errors.New("digest not found")
|
||||||
|
|
||||||
|
// ErrDigestAmbiguous is used when multiple digests
|
||||||
|
// are found in a set. None of the matching digests
|
||||||
|
// should be considered valid matches.
|
||||||
|
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Set is used to hold a unique set of digests which
|
||||||
|
// may be easily referenced by easily referenced by a string
|
||||||
|
// representation of the digest as well as short representation.
|
||||||
|
// The uniqueness of the short representation is based on other
|
||||||
|
// digests in the set. If digests are ommited from this set,
|
||||||
|
// collisions in a larger set may not be detected, therefore it
|
||||||
|
// is important to always do short representation lookups on
|
||||||
|
// the complete set of digests. To mitigate collisions, an
|
||||||
|
// appropriately long short code should be used.
|
||||||
|
type Set struct {
|
||||||
|
entries digestEntries
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSet creates an empty set of digests
|
||||||
|
// which may have digests added.
|
||||||
|
func NewSet() *Set {
|
||||||
|
return &Set{
|
||||||
|
entries: digestEntries{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkShortMatch checks whether two digests match as either whole
|
||||||
|
// values or short values. This function does not test equality,
|
||||||
|
// rather whether the second value could match against the first
|
||||||
|
// value.
|
||||||
|
func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
|
||||||
|
if len(hex) == len(shortHex) {
|
||||||
|
if hex != shortHex {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else if !strings.HasPrefix(hex, shortHex) {
|
||||||
|
return false
|
||||||
|
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup looks for a digest matching the given string representation.
|
||||||
|
// If no digests could be found ErrDigestNotFound will be returned
|
||||||
|
// with an empty digest value. If multiple matches are found
|
||||||
|
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||||
|
func (dst *Set) Lookup(d string) (Digest, error) {
|
||||||
|
if len(dst.entries) == 0 {
|
||||||
|
return "", ErrDigestNotFound
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
searchFunc func(int) bool
|
||||||
|
alg Algorithm
|
||||||
|
hex string
|
||||||
|
)
|
||||||
|
dgst, err := ParseDigest(d)
|
||||||
|
if err == ErrDigestInvalidFormat {
|
||||||
|
hex = d
|
||||||
|
searchFunc = func(i int) bool {
|
||||||
|
return dst.entries[i].val >= d
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
hex = dgst.Hex()
|
||||||
|
alg = dgst.Algorithm()
|
||||||
|
searchFunc = func(i int) bool {
|
||||||
|
if dst.entries[i].val == hex {
|
||||||
|
return dst.entries[i].alg >= alg
|
||||||
|
}
|
||||||
|
return dst.entries[i].val >= hex
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idx := sort.Search(len(dst.entries), searchFunc)
|
||||||
|
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||||
|
return "", ErrDigestNotFound
|
||||||
|
}
|
||||||
|
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||||
|
return dst.entries[idx].digest, nil
|
||||||
|
}
|
||||||
|
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||||
|
return "", ErrDigestAmbiguous
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst.entries[idx].digest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds the given digests to the set. An error will be returned
|
||||||
|
// if the given digest is invalid. If the digest already exists in the
|
||||||
|
// table, this operation will be a no-op.
|
||||||
|
func (dst *Set) Add(d Digest) error {
|
||||||
|
if err := d.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||||
|
searchFunc := func(i int) bool {
|
||||||
|
if dst.entries[i].val == entry.val {
|
||||||
|
return dst.entries[i].alg >= entry.alg
|
||||||
|
}
|
||||||
|
return dst.entries[i].val >= entry.val
|
||||||
|
}
|
||||||
|
idx := sort.Search(len(dst.entries), searchFunc)
|
||||||
|
if idx == len(dst.entries) {
|
||||||
|
dst.entries = append(dst.entries, entry)
|
||||||
|
return nil
|
||||||
|
} else if dst.entries[idx].digest == d {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
entries := append(dst.entries, nil)
|
||||||
|
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||||
|
entries[idx] = entry
|
||||||
|
dst.entries = entries
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||||
|
// length represents the minimum value, the maximum length may be the
|
||||||
|
// entire value of digest if uniqueness cannot be achieved without the
|
||||||
|
// full value. This function will attempt to make short codes as short
|
||||||
|
// as possible to be unique.
|
||||||
|
func ShortCodeTable(dst *Set, length int) map[Digest]string {
|
||||||
|
m := make(map[Digest]string, len(dst.entries))
|
||||||
|
l := length
|
||||||
|
resetIdx := 0
|
||||||
|
for i := 0; i < len(dst.entries); i++ {
|
||||||
|
var short string
|
||||||
|
extended := true
|
||||||
|
for extended {
|
||||||
|
extended = false
|
||||||
|
if len(dst.entries[i].val) <= l {
|
||||||
|
short = dst.entries[i].digest.String()
|
||||||
|
} else {
|
||||||
|
short = dst.entries[i].val[:l]
|
||||||
|
for j := i + 1; j < len(dst.entries); j++ {
|
||||||
|
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||||
|
if j > resetIdx {
|
||||||
|
resetIdx = j
|
||||||
|
}
|
||||||
|
extended = true
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if extended {
|
||||||
|
l++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m[dst.entries[i].digest] = short
|
||||||
|
if i >= resetIdx {
|
||||||
|
l = length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
type digestEntry struct {
|
||||||
|
alg Algorithm
|
||||||
|
val string
|
||||||
|
digest Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
type digestEntries []*digestEntry
|
||||||
|
|
||||||
|
func (d digestEntries) Len() int {
|
||||||
|
return len(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d digestEntries) Less(i, j int) bool {
|
||||||
|
if d[i].val != d[j].val {
|
||||||
|
return d[i].val < d[j].val
|
||||||
|
}
|
||||||
|
return d[i].alg < d[j].alg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d digestEntries) Swap(i, j int) {
|
||||||
|
d[i], d[j] = d[j], d[i]
|
||||||
|
}
|
272
vendor/src/github.com/docker/distribution/digest/set_test.go
vendored
Normal file
272
vendor/src/github.com/docker/distribution/digest/set_test.go
vendored
Normal file
|
@ -0,0 +1,272 @@
|
||||||
|
package digest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/binary"
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func assertEqualDigests(t *testing.T, d1, d2 Digest) {
|
||||||
|
if d1 != d2 {
|
||||||
|
t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLookup(t *testing.T) {
|
||||||
|
digests := []Digest{
|
||||||
|
"sha256:12345",
|
||||||
|
"sha256:1234",
|
||||||
|
"sha256:12346",
|
||||||
|
"sha256:54321",
|
||||||
|
"sha256:65431",
|
||||||
|
"sha256:64321",
|
||||||
|
"sha256:65421",
|
||||||
|
"sha256:65321",
|
||||||
|
}
|
||||||
|
|
||||||
|
dset := NewSet()
|
||||||
|
for i := range digests {
|
||||||
|
if err := dset.Add(digests[i]); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst, err := dset.Lookup("54")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assertEqualDigests(t, dgst, digests[3])
|
||||||
|
|
||||||
|
dgst, err = dset.Lookup("1234")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected ambiguous error looking up: 1234")
|
||||||
|
}
|
||||||
|
if err != ErrDigestAmbiguous {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst, err = dset.Lookup("9876")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Expected ambiguous error looking up: 9876")
|
||||||
|
}
|
||||||
|
if err != ErrDigestNotFound {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst, err = dset.Lookup("sha256:1234")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assertEqualDigests(t, dgst, digests[1])
|
||||||
|
|
||||||
|
dgst, err = dset.Lookup("sha256:12345")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assertEqualDigests(t, dgst, digests[0])
|
||||||
|
|
||||||
|
dgst, err = dset.Lookup("sha256:12346")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assertEqualDigests(t, dgst, digests[2])
|
||||||
|
|
||||||
|
dgst, err = dset.Lookup("12346")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assertEqualDigests(t, dgst, digests[2])
|
||||||
|
|
||||||
|
dgst, err = dset.Lookup("12345")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
assertEqualDigests(t, dgst, digests[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddDuplication(t *testing.T) {
|
||||||
|
digests := []Digest{
|
||||||
|
"sha256:1234",
|
||||||
|
"sha256:12345",
|
||||||
|
"sha256:12346",
|
||||||
|
"sha256:54321",
|
||||||
|
"sha256:65431",
|
||||||
|
"sha512:65431",
|
||||||
|
"sha512:65421",
|
||||||
|
"sha512:65321",
|
||||||
|
}
|
||||||
|
|
||||||
|
dset := NewSet()
|
||||||
|
for i := range digests {
|
||||||
|
if err := dset.Add(digests[i]); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dset.entries) != 8 {
|
||||||
|
t.Fatal("Invalid dset size")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := dset.Add(Digest("sha256:12345")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dset.entries) != 8 {
|
||||||
|
t.Fatal("Duplicate digest insert allowed")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := dset.Add(Digest("sha384:12345")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dset.entries) != 9 {
|
||||||
|
t.Fatal("Insert with different algorithm not allowed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertEqualShort(t *testing.T, actual, expected string) {
|
||||||
|
if actual != expected {
|
||||||
|
t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestShortCodeTable(t *testing.T) {
|
||||||
|
digests := []Digest{
|
||||||
|
"sha256:1234",
|
||||||
|
"sha256:12345",
|
||||||
|
"sha256:12346",
|
||||||
|
"sha256:54321",
|
||||||
|
"sha256:65431",
|
||||||
|
"sha256:64321",
|
||||||
|
"sha256:65421",
|
||||||
|
"sha256:65321",
|
||||||
|
}
|
||||||
|
|
||||||
|
dset := NewSet()
|
||||||
|
for i := range digests {
|
||||||
|
if err := dset.Add(digests[i]); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dump := ShortCodeTable(dset, 2)
|
||||||
|
|
||||||
|
if len(dump) < len(digests) {
|
||||||
|
t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests))
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEqualShort(t, dump[digests[0]], "sha256:1234")
|
||||||
|
assertEqualShort(t, dump[digests[1]], "sha256:12345")
|
||||||
|
assertEqualShort(t, dump[digests[2]], "sha256:12346")
|
||||||
|
assertEqualShort(t, dump[digests[3]], "54")
|
||||||
|
assertEqualShort(t, dump[digests[4]], "6543")
|
||||||
|
assertEqualShort(t, dump[digests[5]], "64")
|
||||||
|
assertEqualShort(t, dump[digests[6]], "6542")
|
||||||
|
assertEqualShort(t, dump[digests[7]], "653")
|
||||||
|
}
|
||||||
|
|
||||||
|
func createDigests(count int) ([]Digest, error) {
|
||||||
|
r := rand.New(rand.NewSource(25823))
|
||||||
|
digests := make([]Digest, count)
|
||||||
|
for i := range digests {
|
||||||
|
h := sha256.New()
|
||||||
|
if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
digests[i] = NewDigest("sha256", h)
|
||||||
|
}
|
||||||
|
return digests, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchAddNTable(b *testing.B, n int) {
|
||||||
|
digests, err := createDigests(n)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
|
||||||
|
for j := range digests {
|
||||||
|
if err = dset.Add(digests[j]); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchLookupNTable(b *testing.B, n int, shortLen int) {
|
||||||
|
digests, err := createDigests(n)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
|
||||||
|
for i := range digests {
|
||||||
|
if err := dset.Add(digests[i]); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
shorts := make([]string, 0, n)
|
||||||
|
for _, short := range ShortCodeTable(dset, shortLen) {
|
||||||
|
shorts = append(shorts, short)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
if _, err = dset.Lookup(shorts[i%n]); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchShortCodeNTable(b *testing.B, n int, shortLen int) {
|
||||||
|
digests, err := createDigests(n)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
|
||||||
|
for i := range digests {
|
||||||
|
if err := dset.Add(digests[i]); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ShortCodeTable(dset, shortLen)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAdd10(b *testing.B) {
|
||||||
|
benchAddNTable(b, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAdd100(b *testing.B) {
|
||||||
|
benchAddNTable(b, 100)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAdd1000(b *testing.B) {
|
||||||
|
benchAddNTable(b, 1000)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkLookup10(b *testing.B) {
|
||||||
|
benchLookupNTable(b, 10, 12)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkLookup100(b *testing.B) {
|
||||||
|
benchLookupNTable(b, 100, 12)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkLookup1000(b *testing.B) {
|
||||||
|
benchLookupNTable(b, 1000, 12)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkShortCode10(b *testing.B) {
|
||||||
|
benchShortCodeNTable(b, 10, 12)
|
||||||
|
}
|
||||||
|
func BenchmarkShortCode100(b *testing.B) {
|
||||||
|
benchShortCodeNTable(b, 100, 12)
|
||||||
|
}
|
||||||
|
func BenchmarkShortCode1000(b *testing.B) {
|
||||||
|
benchShortCodeNTable(b, 1000, 12)
|
||||||
|
}
|
|
@ -6,10 +6,10 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TarSumRegexp defines a reguler expression to match tarsum identifiers.
|
// TarSumRegexp defines a regular expression to match tarsum identifiers.
|
||||||
var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+")
|
var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+")
|
||||||
|
|
||||||
// TarsumRegexpCapturing defines a reguler expression to match tarsum identifiers with
|
// TarsumRegexpCapturing defines a regular expression to match tarsum identifiers with
|
||||||
// capture groups corresponding to each component.
|
// capture groups corresponding to each component.
|
||||||
var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)")
|
var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)")
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
package digest
|
package digest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
|
||||||
"crypto/sha512"
|
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -33,7 +31,7 @@ func NewDigestVerifier(d Digest) (Verifier, error) {
|
||||||
switch alg {
|
switch alg {
|
||||||
case "sha256", "sha384", "sha512":
|
case "sha256", "sha384", "sha512":
|
||||||
return hashVerifier{
|
return hashVerifier{
|
||||||
hash: newHash(alg),
|
hash: alg.Hash(),
|
||||||
digest: d,
|
digest: d,
|
||||||
}, nil
|
}, nil
|
||||||
default:
|
default:
|
||||||
|
@ -95,19 +93,6 @@ func (lv *lengthVerifier) Verified() bool {
|
||||||
return lv.expected == lv.len
|
return lv.expected == lv.len
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHash(name string) hash.Hash {
|
|
||||||
switch name {
|
|
||||||
case "sha256":
|
|
||||||
return sha256.New()
|
|
||||||
case "sha384":
|
|
||||||
return sha512.New384()
|
|
||||||
case "sha512":
|
|
||||||
return sha512.New()
|
|
||||||
default:
|
|
||||||
panic("unsupport algorithm: " + name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type hashVerifier struct {
|
type hashVerifier struct {
|
||||||
digest Digest
|
digest Digest
|
||||||
hash hash.Hash
|
hash hash.Hash
|
||||||
|
|
|
@ -80,7 +80,7 @@ func TestVerifierUnsupportedDigest(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != ErrDigestUnsupported {
|
if err != ErrDigestUnsupported {
|
||||||
t.Fatalf("incorrect error for unsupported digest: %v %p %p", err, ErrDigestUnsupported, err)
|
t.Fatalf("incorrect error for unsupported digest: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ var (
|
||||||
Name: "uuid",
|
Name: "uuid",
|
||||||
Type: "opaque",
|
Type: "opaque",
|
||||||
Required: true,
|
Required: true,
|
||||||
Description: `A uuid identifying the upload. This field can accept almost anything.`,
|
Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.",
|
||||||
}
|
}
|
||||||
|
|
||||||
digestPathParameter = ParameterDescriptor{
|
digestPathParameter = ParameterDescriptor{
|
||||||
|
@ -135,7 +135,7 @@ const (
|
||||||
"tag": <tag>,
|
"tag": <tag>,
|
||||||
"fsLayers": [
|
"fsLayers": [
|
||||||
{
|
{
|
||||||
"blobSum": <tarsum>
|
"blobSum": "<digest>"
|
||||||
},
|
},
|
||||||
...
|
...
|
||||||
]
|
]
|
||||||
|
@ -606,7 +606,7 @@ var routeDescriptors = []RouteDescriptor{
|
||||||
"code": "BLOB_UNKNOWN",
|
"code": "BLOB_UNKNOWN",
|
||||||
"message": "blob unknown to registry",
|
"message": "blob unknown to registry",
|
||||||
"detail": {
|
"detail": {
|
||||||
"digest": <tarsum>
|
"digest": "<digest>"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
...
|
...
|
||||||
|
@ -712,7 +712,7 @@ var routeDescriptors = []RouteDescriptor{
|
||||||
Name: RouteNameBlob,
|
Name: RouteNameBlob,
|
||||||
Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}",
|
Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}",
|
||||||
Entity: "Blob",
|
Entity: "Blob",
|
||||||
Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by tarsum digest.",
|
Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by digest.",
|
||||||
Methods: []MethodDescriptor{
|
Methods: []MethodDescriptor{
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -898,7 +898,7 @@ var routeDescriptors = []RouteDescriptor{
|
||||||
{
|
{
|
||||||
Name: "digest",
|
Name: "digest",
|
||||||
Type: "query",
|
Type: "query",
|
||||||
Format: "<tarsum>",
|
Format: "<digest>",
|
||||||
Regexp: digest.DigestRegexp,
|
Regexp: digest.DigestRegexp,
|
||||||
Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`,
|
Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`,
|
||||||
},
|
},
|
||||||
|
@ -985,7 +985,7 @@ var routeDescriptors = []RouteDescriptor{
|
||||||
|
|
||||||
{
|
{
|
||||||
Name: RouteNameBlobUploadChunk,
|
Name: RouteNameBlobUploadChunk,
|
||||||
Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}",
|
Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}",
|
||||||
Entity: "Blob Upload",
|
Entity: "Blob Upload",
|
||||||
Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.",
|
Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.",
|
||||||
Methods: []MethodDescriptor{
|
Methods: []MethodDescriptor{
|
||||||
|
@ -1055,7 +1055,74 @@ var routeDescriptors = []RouteDescriptor{
|
||||||
Description: "Upload a chunk of data for the specified upload.",
|
Description: "Upload a chunk of data for the specified upload.",
|
||||||
Requests: []RequestDescriptor{
|
Requests: []RequestDescriptor{
|
||||||
{
|
{
|
||||||
Description: "Upload a chunk of data to specified upload without completing the upload.",
|
Name: "Stream upload",
|
||||||
|
Description: "Upload a stream of data to upload without completing the upload.",
|
||||||
|
PathParameters: []ParameterDescriptor{
|
||||||
|
nameParameterDescriptor,
|
||||||
|
uuidParameterDescriptor,
|
||||||
|
},
|
||||||
|
Headers: []ParameterDescriptor{
|
||||||
|
hostHeader,
|
||||||
|
authHeader,
|
||||||
|
},
|
||||||
|
Body: BodyDescriptor{
|
||||||
|
ContentType: "application/octet-stream",
|
||||||
|
Format: "<binary data>",
|
||||||
|
},
|
||||||
|
Successes: []ResponseDescriptor{
|
||||||
|
{
|
||||||
|
Name: "Data Accepted",
|
||||||
|
Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.",
|
||||||
|
StatusCode: http.StatusNoContent,
|
||||||
|
Headers: []ParameterDescriptor{
|
||||||
|
{
|
||||||
|
Name: "Location",
|
||||||
|
Type: "url",
|
||||||
|
Format: "/v2/<name>/blobs/uploads/<uuid>",
|
||||||
|
Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Range",
|
||||||
|
Type: "header",
|
||||||
|
Format: "0-<offset>",
|
||||||
|
Description: "Range indicating the current progress of the upload.",
|
||||||
|
},
|
||||||
|
contentLengthZeroHeader,
|
||||||
|
dockerUploadUUIDHeader,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Failures: []ResponseDescriptor{
|
||||||
|
{
|
||||||
|
Description: "There was an error processing the upload and it must be restarted.",
|
||||||
|
StatusCode: http.StatusBadRequest,
|
||||||
|
ErrorCodes: []ErrorCode{
|
||||||
|
ErrorCodeDigestInvalid,
|
||||||
|
ErrorCodeNameInvalid,
|
||||||
|
ErrorCodeBlobUploadInvalid,
|
||||||
|
},
|
||||||
|
Body: BodyDescriptor{
|
||||||
|
ContentType: "application/json; charset=utf-8",
|
||||||
|
Format: errorsBody,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
unauthorizedResponsePush,
|
||||||
|
{
|
||||||
|
Description: "The upload is unknown to the registry. The upload must be restarted.",
|
||||||
|
StatusCode: http.StatusNotFound,
|
||||||
|
ErrorCodes: []ErrorCode{
|
||||||
|
ErrorCodeBlobUploadUnknown,
|
||||||
|
},
|
||||||
|
Body: BodyDescriptor{
|
||||||
|
ContentType: "application/json; charset=utf-8",
|
||||||
|
Format: errorsBody,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Chunked upload",
|
||||||
|
Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.",
|
||||||
PathParameters: []ParameterDescriptor{
|
PathParameters: []ParameterDescriptor{
|
||||||
nameParameterDescriptor,
|
nameParameterDescriptor,
|
||||||
uuidParameterDescriptor,
|
uuidParameterDescriptor,
|
||||||
|
@ -1143,26 +1210,15 @@ var routeDescriptors = []RouteDescriptor{
|
||||||
Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.",
|
Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.",
|
||||||
Requests: []RequestDescriptor{
|
Requests: []RequestDescriptor{
|
||||||
{
|
{
|
||||||
// TODO(stevvooe): Break this down into three separate requests:
|
Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.",
|
||||||
// 1. Complete an upload where all data has already been sent.
|
|
||||||
// 2. Complete an upload where the entire body is in the PUT.
|
|
||||||
// 3. Complete an upload where the final, partial chunk is the body.
|
|
||||||
|
|
||||||
Description: "Complete the upload, providing the _final_ chunk of data, if necessary. This method may take a body with all the data. If the `Content-Range` header is specified, it may include the final chunk. A request without a body will just complete the upload with previously uploaded content.",
|
|
||||||
Headers: []ParameterDescriptor{
|
Headers: []ParameterDescriptor{
|
||||||
hostHeader,
|
hostHeader,
|
||||||
authHeader,
|
authHeader,
|
||||||
{
|
|
||||||
Name: "Content-Range",
|
|
||||||
Type: "header",
|
|
||||||
Format: "<start of range>-<end of range, inclusive>",
|
|
||||||
Description: "Range of bytes identifying the block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header. May be omitted if no data is provided.",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "Content-Length",
|
Name: "Content-Length",
|
||||||
Type: "integer",
|
Type: "integer",
|
||||||
Format: "<length of chunk>",
|
Format: "<length of data>",
|
||||||
Description: "Length of the chunk being uploaded, corresponding to the length of the request body. May be zero if no data is provided.",
|
Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
PathParameters: []ParameterDescriptor{
|
PathParameters: []ParameterDescriptor{
|
||||||
|
@ -1173,7 +1229,7 @@ var routeDescriptors = []RouteDescriptor{
|
||||||
{
|
{
|
||||||
Name: "digest",
|
Name: "digest",
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Format: "<tarsum>",
|
Format: "<digest>",
|
||||||
Regexp: digest.DigestRegexp,
|
Regexp: digest.DigestRegexp,
|
||||||
Required: true,
|
Required: true,
|
||||||
Description: `Digest of uploaded blob.`,
|
Description: `Digest of uploaded blob.`,
|
||||||
|
@ -1181,7 +1237,7 @@ var routeDescriptors = []RouteDescriptor{
|
||||||
},
|
},
|
||||||
Body: BodyDescriptor{
|
Body: BodyDescriptor{
|
||||||
ContentType: "application/octet-stream",
|
ContentType: "application/octet-stream",
|
||||||
Format: "<binary chunk>",
|
Format: "<binary data>",
|
||||||
},
|
},
|
||||||
Successes: []ResponseDescriptor{
|
Successes: []ResponseDescriptor{
|
||||||
{
|
{
|
||||||
|
@ -1190,9 +1246,10 @@ var routeDescriptors = []RouteDescriptor{
|
||||||
StatusCode: http.StatusNoContent,
|
StatusCode: http.StatusNoContent,
|
||||||
Headers: []ParameterDescriptor{
|
Headers: []ParameterDescriptor{
|
||||||
{
|
{
|
||||||
Name: "Location",
|
Name: "Location",
|
||||||
Type: "url",
|
Type: "url",
|
||||||
Format: "<blob location>",
|
Format: "<blob location>",
|
||||||
|
Description: "The canonical location of the blob for retrieval",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "Content-Range",
|
Name: "Content-Range",
|
||||||
|
@ -1200,12 +1257,7 @@ var routeDescriptors = []RouteDescriptor{
|
||||||
Format: "<start of range>-<end of range, inclusive>",
|
Format: "<start of range>-<end of range, inclusive>",
|
||||||
Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.",
|
Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.",
|
||||||
},
|
},
|
||||||
{
|
contentLengthZeroHeader,
|
||||||
Name: "Content-Length",
|
|
||||||
Type: "integer",
|
|
||||||
Format: "<length of chunk>",
|
|
||||||
Description: "Length of the chunk being uploaded, corresponding the length of the request body.",
|
|
||||||
},
|
|
||||||
digestHeader,
|
digestHeader,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -1236,24 +1288,6 @@ var routeDescriptors = []RouteDescriptor{
|
||||||
Format: errorsBody,
|
Format: errorsBody,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. The contents of the `Range` header may be used to resolve the condition.",
|
|
||||||
StatusCode: http.StatusRequestedRangeNotSatisfiable,
|
|
||||||
Headers: []ParameterDescriptor{
|
|
||||||
{
|
|
||||||
Name: "Location",
|
|
||||||
Type: "url",
|
|
||||||
Format: "/v2/<name>/blobs/uploads/<uuid>",
|
|
||||||
Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "Range",
|
|
||||||
Type: "header",
|
|
||||||
Format: "0-<offset>",
|
|
||||||
Description: "Range indicating the current progress of the upload.",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -46,7 +46,7 @@ var (
|
||||||
// ErrRepositoryNameComponentShort is returned when a repository name
|
// ErrRepositoryNameComponentShort is returned when a repository name
|
||||||
// contains a component which is shorter than
|
// contains a component which is shorter than
|
||||||
// RepositoryNameComponentMinLength
|
// RepositoryNameComponentMinLength
|
||||||
ErrRepositoryNameComponentShort = fmt.Errorf("respository name component must be %v or more characters", RepositoryNameComponentMinLength)
|
ErrRepositoryNameComponentShort = fmt.Errorf("repository name component must be %v or more characters", RepositoryNameComponentMinLength)
|
||||||
|
|
||||||
// ErrRepositoryNameMissingComponents is returned when a repository name
|
// ErrRepositoryNameMissingComponents is returned when a repository name
|
||||||
// contains fewer than RepositoryNameMinComponents components
|
// contains fewer than RepositoryNameMinComponents components
|
||||||
|
@ -61,7 +61,7 @@ var (
|
||||||
ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String())
|
ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String())
|
||||||
)
|
)
|
||||||
|
|
||||||
// ValidateRespositoryName ensures the repository name is valid for use in the
|
// ValidateRepositoryName ensures the repository name is valid for use in the
|
||||||
// registry. This function accepts a superset of what might be accepted by
|
// registry. This function accepts a superset of what might be accepted by
|
||||||
// docker core or docker hub. If the name does not pass validation, an error,
|
// docker core or docker hub. If the name does not pass validation, an error,
|
||||||
// describing the conditions, is returned.
|
// describing the conditions, is returned.
|
||||||
|
@ -75,7 +75,7 @@ var (
|
||||||
//
|
//
|
||||||
// The result of the production, known as the "namespace", should be limited
|
// The result of the production, known as the "namespace", should be limited
|
||||||
// to 255 characters.
|
// to 255 characters.
|
||||||
func ValidateRespositoryName(name string) error {
|
func ValidateRepositoryName(name string) error {
|
||||||
if len(name) > RepositoryNameTotalLengthMax {
|
if len(name) > RepositoryNameTotalLengthMax {
|
||||||
return ErrRepositoryNameLong
|
return ErrRepositoryNameLong
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,7 +80,7 @@ func TestRepositoryNameRegexp(t *testing.T) {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ValidateRespositoryName(testcase.input); err != testcase.err {
|
if err := ValidateRepositoryName(testcase.input); err != testcase.err {
|
||||||
if testcase.err != nil {
|
if testcase.err != nil {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err)
|
failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err)
|
||||||
|
|
|
@ -98,6 +98,7 @@ func TestRouter(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
// support uuid proper
|
||||||
RouteName: RouteNameBlobUploadChunk,
|
RouteName: RouteNameBlobUploadChunk,
|
||||||
RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
|
RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286",
|
||||||
Vars: map[string]string{
|
Vars: map[string]string{
|
||||||
|
@ -113,6 +114,21 @@ func TestRouter(t *testing.T) {
|
||||||
"uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==",
|
"uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
// supports urlsafe base64
|
||||||
|
RouteName: RouteNameBlobUploadChunk,
|
||||||
|
RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==",
|
||||||
|
Vars: map[string]string{
|
||||||
|
"name": "foo/bar",
|
||||||
|
"uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// does not match
|
||||||
|
RouteName: RouteNameBlobUploadChunk,
|
||||||
|
RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==",
|
||||||
|
StatusCode: http.StatusNotFound,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
// Check ambiguity: ensure we can distinguish between tags for
|
// Check ambiguity: ensure we can distinguish between tags for
|
||||||
// "foo/bar/image/image" and image for "foo/bar/image" with tag
|
// "foo/bar/image/image" and image for "foo/bar/image" with tag
|
||||||
|
|
|
@ -62,7 +62,12 @@ func NewURLBuilderFromRequest(r *http.Request) *URLBuilder {
|
||||||
host := r.Host
|
host := r.Host
|
||||||
forwardedHost := r.Header.Get("X-Forwarded-Host")
|
forwardedHost := r.Header.Get("X-Forwarded-Host")
|
||||||
if len(forwardedHost) > 0 {
|
if len(forwardedHost) > 0 {
|
||||||
host = forwardedHost
|
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
|
||||||
|
// comma-separated list of hosts, to which each proxy appends the
|
||||||
|
// requested host. We want to grab the first from this comma-separated
|
||||||
|
// list.
|
||||||
|
hosts := strings.SplitN(forwardedHost, ",", 2)
|
||||||
|
host = strings.TrimSpace(hosts[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
basePath := routeDescriptorsMap[RouteNameBase].Path
|
basePath := routeDescriptorsMap[RouteNameBase].Path
|
||||||
|
|
|
@ -151,6 +151,12 @@ func TestBuilderFromRequest(t *testing.T) {
|
||||||
forwardedProtoHeader := make(http.Header, 1)
|
forwardedProtoHeader := make(http.Header, 1)
|
||||||
forwardedProtoHeader.Set("X-Forwarded-Proto", "https")
|
forwardedProtoHeader.Set("X-Forwarded-Proto", "https")
|
||||||
|
|
||||||
|
forwardedHostHeader1 := make(http.Header, 1)
|
||||||
|
forwardedHostHeader1.Set("X-Forwarded-Host", "first.example.com")
|
||||||
|
|
||||||
|
forwardedHostHeader2 := make(http.Header, 1)
|
||||||
|
forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com")
|
||||||
|
|
||||||
testRequests := []struct {
|
testRequests := []struct {
|
||||||
request *http.Request
|
request *http.Request
|
||||||
base string
|
base string
|
||||||
|
@ -163,6 +169,14 @@ func TestBuilderFromRequest(t *testing.T) {
|
||||||
request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
|
request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader},
|
||||||
base: "https://example.com",
|
base: "https://example.com",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader1},
|
||||||
|
base: "http://first.example.com",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2},
|
||||||
|
base: "http://first.example.com",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tr := range testRequests {
|
for _, tr := range testRequests {
|
||||||
|
|
Loading…
Reference in a new issue