Merge pull request #284 from tiborvass/19.03-revert-remove-legacy-registry
[19.03] Keep but deprecate registry v2 schema1 logic and revert to libtrust-key-based engine ID
This commit is contained in:
commit
b07f53d0a4
30 changed files with 571 additions and 972 deletions
12
Dockerfile
12
Dockerfile
|
@ -51,6 +51,11 @@ RUN apt-get update && apt-get install -y \
|
|||
&& make PREFIX=/build/ install-criu
|
||||
|
||||
FROM base AS registry
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
# only supports schema1 manifests. The second is a newer version that supports
|
||||
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||
# and schema2 manifests.
|
||||
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
|
||||
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
|
@ -58,6 +63,13 @@ RUN set -x \
|
|||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
|
||||
&& case $(dpkg --print-architecture) in \
|
||||
amd64|ppc64*|s390x) \
|
||||
(cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \
|
||||
GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
|
||||
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
|
||||
;; \
|
||||
esac \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
|
||||
|
|
|
@ -12,6 +12,8 @@ import (
|
|||
const (
|
||||
// defaultShutdownTimeout is the default shutdown timeout for the daemon
|
||||
defaultShutdownTimeout = 15
|
||||
// defaultTrustKeyFile is the default filename for the trust key
|
||||
defaultTrustKeyFile = "key.json"
|
||||
)
|
||||
|
||||
// installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon
|
||||
|
|
|
@ -424,6 +424,14 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
|
|||
conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile
|
||||
}
|
||||
|
||||
if conf.TrustKeyPath == "" {
|
||||
daemonConfDir, err := getDaemonConfDir(conf.Root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile)
|
||||
}
|
||||
|
||||
if flags.Changed("graph") && flags.Changed("data-root") {
|
||||
return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`)
|
||||
}
|
||||
|
|
|
@ -58,6 +58,10 @@ func setDefaultUmask() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getDaemonConfDir(_ string) (string, error) {
|
||||
return getDefaultDaemonConfigDir()
|
||||
}
|
||||
|
||||
func (cli *DaemonCli) getPlatformContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) {
|
||||
opts := []supervisor.DaemonOpt{
|
||||
supervisor.WithOOMScore(cli.Config.OOMScoreAdjust),
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
|
@ -23,6 +24,10 @@ func setDefaultUmask() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getDaemonConfDir(root string) (string, error) {
|
||||
return filepath.Join(root, `\config`), nil
|
||||
}
|
||||
|
||||
// preNotifySystem sends a message to the host when the API is active, but before the daemon is
|
||||
func preNotifySystem() {
|
||||
// start the service now to prevent timeouts waiting for daemon to start
|
||||
|
|
|
@ -63,6 +63,8 @@ var flatOptions = map[string]bool{
|
|||
var skipValidateOptions = map[string]bool{
|
||||
"features": true,
|
||||
"builder": true,
|
||||
// Corresponding flag has been removed because it was already unusable
|
||||
"deprecated-key-path": true,
|
||||
}
|
||||
|
||||
// skipDuplicates contains configuration keys that
|
||||
|
@ -134,6 +136,12 @@ type CommonConfig struct {
|
|||
SocketGroup string `json:"group,omitempty"`
|
||||
CorsHeaders string `json:"api-cors-header,omitempty"`
|
||||
|
||||
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
|
||||
// when pushing to a registry which does not support schema 2. This field is marked as
|
||||
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
|
||||
// daemon ID will use a dedicated identifier not shared with exported signatures.
|
||||
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
|
||||
|
||||
// LiveRestoreEnabled determines whether we should keep containers
|
||||
// alive upon daemon shutdown/start
|
||||
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
|
||||
|
@ -239,7 +247,6 @@ func New() *Config {
|
|||
config := Config{}
|
||||
config.LogConfig.Config = make(map[string]string)
|
||||
config.ClusterOpts = make(map[string]string)
|
||||
|
||||
return &config
|
||||
}
|
||||
|
||||
|
|
|
@ -960,7 +960,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
return nil, err
|
||||
}
|
||||
|
||||
uuid, err := loadOrCreateUUID(filepath.Join(config.Root, "engine_uuid"))
|
||||
trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1005,7 +1005,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
return nil, errors.New("Devices cgroup isn't mounted")
|
||||
}
|
||||
|
||||
d.ID = uuid
|
||||
d.ID = trustKey.PublicKey().KeyID()
|
||||
d.repository = daemonRepo
|
||||
d.containers = container.NewMemoryStore()
|
||||
if d.containersReplica, err = container.NewViewDB(); err != nil {
|
||||
|
@ -1036,6 +1036,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
MaxConcurrentUploads: *config.MaxConcurrentUploads,
|
||||
ReferenceStore: rs,
|
||||
RegistryService: registryService,
|
||||
TrustKey: trustKey,
|
||||
})
|
||||
|
||||
go d.execCommandGC()
|
||||
|
|
|
@ -54,6 +54,7 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea
|
|||
},
|
||||
ConfigMediaType: schema2.MediaTypeImageConfig,
|
||||
LayerStores: distribution.NewLayerProvidersFromStores(i.layerStores),
|
||||
TrustKey: i.trustKey,
|
||||
UploadManager: i.uploadManager,
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/docker/docker/layer"
|
||||
dockerreference "github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -39,6 +40,7 @@ type ImageServiceConfig struct {
|
|||
MaxConcurrentUploads int
|
||||
ReferenceStore dockerreference.Store
|
||||
RegistryService registry.Service
|
||||
TrustKey libtrust.PrivateKey
|
||||
}
|
||||
|
||||
// NewImageService returns a new ImageService from a configuration
|
||||
|
@ -54,6 +56,7 @@ func NewImageService(config ImageServiceConfig) *ImageService {
|
|||
layerStores: config.LayerStores,
|
||||
referenceStore: config.ReferenceStore,
|
||||
registryService: config.RegistryService,
|
||||
trustKey: config.TrustKey,
|
||||
uploadManager: xfer.NewLayerUploadManager(config.MaxConcurrentUploads),
|
||||
}
|
||||
}
|
||||
|
@ -69,6 +72,7 @@ type ImageService struct {
|
|||
pruneRunning int32
|
||||
referenceStore dockerreference.Store
|
||||
registryService registry.Service
|
||||
trustKey libtrust.PrivateKey
|
||||
uploadManager *xfer.LayerUploadManager
|
||||
}
|
||||
|
||||
|
|
57
daemon/trustkey.go
Normal file
57
daemon/trustkey.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
|
||||
// otherwise generates a new one
|
||||
// TODO: this should use more of libtrust.LoadOrCreateTrustKey which may need
|
||||
// a refactor or this function to be moved into libtrust
|
||||
func loadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
|
||||
err := system.MkdirAll(filepath.Dir(trustKeyPath), 0755, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
|
||||
if err == libtrust.ErrKeyFileDoesNotExist {
|
||||
trustKey, err = libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error generating key: %s", err)
|
||||
}
|
||||
encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error serializing key: %s", err)
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
|
||||
return nil, fmt.Errorf("Error saving key file: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
|
||||
}
|
||||
return trustKey, nil
|
||||
}
|
||||
|
||||
func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
|
||||
if ext == ".json" || ext == ".jwk" {
|
||||
encoded, err = json.Marshal(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
|
||||
}
|
||||
} else {
|
||||
pemBlock, err := key.PEMBlock()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
|
||||
}
|
||||
encoded = pem.EncodeToMemory(pemBlock)
|
||||
}
|
||||
return
|
||||
}
|
71
daemon/trustkey_test.go
Normal file
71
daemon/trustkey_test.go
Normal file
|
@ -0,0 +1,71 @@
|
|||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
"gotest.tools/fs"
|
||||
)
|
||||
|
||||
// LoadOrCreateTrustKey
|
||||
func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) {
|
||||
tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpKeyFolderPath)
|
||||
|
||||
tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile")
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = loadOrCreateTrustKey(tmpKeyFile.Name())
|
||||
assert.Check(t, is.ErrorContains(err, "Error loading key file"))
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyCreateKeyWhenFileDoesNotExist(t *testing.T) {
|
||||
tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test")
|
||||
defer tmpKeyFolderPath.Remove()
|
||||
|
||||
// Without the need to create the folder hierarchy
|
||||
tmpKeyFile := tmpKeyFolderPath.Join("keyfile")
|
||||
|
||||
key, err := loadOrCreateTrustKey(tmpKeyFile)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, key != nil)
|
||||
|
||||
_, err = os.Stat(tmpKeyFile)
|
||||
assert.NilError(t, err, "key file doesn't exist")
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyCreateKeyWhenDirectoryDoesNotExist(t *testing.T) {
|
||||
tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test")
|
||||
defer tmpKeyFolderPath.Remove()
|
||||
tmpKeyFile := tmpKeyFolderPath.Join("folder/hierarchy/keyfile")
|
||||
|
||||
key, err := loadOrCreateTrustKey(tmpKeyFile)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, key != nil)
|
||||
|
||||
_, err = os.Stat(tmpKeyFile)
|
||||
assert.NilError(t, err, "key file doesn't exist")
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyCreateKeyNoPath(t *testing.T) {
|
||||
defer os.Remove("keyfile")
|
||||
key, err := loadOrCreateTrustKey("keyfile")
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, key != nil)
|
||||
|
||||
_, err = os.Stat("keyfile")
|
||||
assert.NilError(t, err, "key file doesn't exist")
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) {
|
||||
tmpKeyFile := filepath.Join("testdata", "keyfile")
|
||||
key, err := loadOrCreateTrustKey(tmpKeyFile)
|
||||
assert.NilError(t, err)
|
||||
expected := "AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY"
|
||||
assert.Check(t, is.Contains(key.String(), expected))
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func loadOrCreateUUID(path string) (string, error) {
|
||||
err := os.MkdirAll(filepath.Dir(path), 0755)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var id string
|
||||
idb, err := ioutil.ReadFile(path)
|
||||
if os.IsNotExist(err) {
|
||||
id = uuid.New().String()
|
||||
if err := ioutils.AtomicWriteFile(path, []byte(id), os.FileMode(0600)); err != nil {
|
||||
return "", fmt.Errorf("Error saving uuid file: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return "", fmt.Errorf("Error loading uuid file %s: %s", path, err)
|
||||
} else {
|
||||
idp, err := uuid.Parse(string(idb))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error parsing uuid in file %s: %s", path, err)
|
||||
}
|
||||
id = idp.String()
|
||||
}
|
||||
return id, nil
|
||||
}
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/docker/docker/pkg/system"
|
||||
refstore "github.com/docker/docker/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
@ -72,6 +73,9 @@ type ImagePushConfig struct {
|
|||
ConfigMediaType string
|
||||
// LayerStores (indexed by operating system) manages layers.
|
||||
LayerStores map[string]PushLayerProvider
|
||||
// TrustKey is the private key for legacy signatures. This is typically
|
||||
// an ephemeral key, since these signatures are no longer verified.
|
||||
TrustKey libtrust.PrivateKey
|
||||
// UploadManager dispatches uploads.
|
||||
UploadManager *xfer.LayerUploadManager
|
||||
}
|
||||
|
|
|
@ -39,12 +39,7 @@ func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo,
|
|||
repoInfo: repoInfo,
|
||||
}, nil
|
||||
case registry.APIVersion1:
|
||||
return &v1Puller{
|
||||
v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore),
|
||||
endpoint: endpoint,
|
||||
config: imagePullConfig,
|
||||
repoInfo: repoInfo,
|
||||
}, nil
|
||||
return nil, fmt.Errorf("protocol version %d no longer supported. Please contact admins of registry %s", endpoint.Version, endpoint.URL)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
|
||||
}
|
||||
|
|
|
@ -1,365 +0,0 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/registry"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type v1Puller struct {
|
||||
v1IDService *metadata.V1IDService
|
||||
endpoint registry.APIEndpoint
|
||||
config *ImagePullConfig
|
||||
repoInfo *registry.RepositoryInfo
|
||||
session *registry.Session
|
||||
}
|
||||
|
||||
func (p *v1Puller) Pull(ctx context.Context, ref reference.Named, _ *specs.Platform) error {
|
||||
if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
||||
// Allowing fallback, because HTTPS v1 is before HTTP v2
|
||||
return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}}
|
||||
}
|
||||
|
||||
tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
|
||||
tr := transport.NewTransport(
|
||||
// TODO(tiborvass): was ReceiveTimeout
|
||||
registry.NewTransport(tlsConfig),
|
||||
registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)...,
|
||||
)
|
||||
client := registry.HTTPClient(tr)
|
||||
v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)
|
||||
p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
|
||||
if err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
logrus.Debugf("Fallback from error: %s", err)
|
||||
return fallbackError{err: err}
|
||||
}
|
||||
if err := p.pullRepository(ctx, ref); err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
return err
|
||||
}
|
||||
progress.Message(p.config.ProgressOutput, "", p.repoInfo.Name.Name()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error {
|
||||
progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.Name.Name())
|
||||
|
||||
tagged, isTagged := ref.(reference.NamedTagged)
|
||||
|
||||
repoData, err := p.session.GetRepositoryData(p.repoInfo.Name)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "HTTP code: 404") {
|
||||
if isTagged {
|
||||
return fmt.Errorf("Error: image %s:%s not found", reference.Path(p.repoInfo.Name), tagged.Tag())
|
||||
}
|
||||
return fmt.Errorf("Error: image %s not found", reference.Path(p.repoInfo.Name))
|
||||
}
|
||||
// Unexpected HTTP error
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debug("Retrieving the tag list")
|
||||
var tagsList map[string]string
|
||||
if !isTagged {
|
||||
tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.Name)
|
||||
} else {
|
||||
var tagID string
|
||||
tagsList = make(map[string]string)
|
||||
tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.Name, tagged.Tag())
|
||||
if err == registry.ErrRepoNotFound {
|
||||
return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.Name.Name())
|
||||
}
|
||||
tagsList[tagged.Tag()] = tagID
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Errorf("unable to get remote tags: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for tag, id := range tagsList {
|
||||
repoData.ImgList[id] = ®istry.ImgData{
|
||||
ID: id,
|
||||
Tag: tag,
|
||||
Checksum: "",
|
||||
}
|
||||
}
|
||||
|
||||
layersDownloaded := false
|
||||
for _, imgData := range repoData.ImgList {
|
||||
if isTagged && imgData.Tag != tagged.Tag() {
|
||||
continue
|
||||
}
|
||||
|
||||
err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error {
|
||||
if img.Tag == "" {
|
||||
logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
localNameRef, err := reference.WithTag(p.repoInfo.Name, img.Tag)
|
||||
if err != nil {
|
||||
retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag)
|
||||
logrus.Debug(retErr.Error())
|
||||
return retErr
|
||||
}
|
||||
|
||||
if err := v1.ValidateID(img.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.Name.Name())
|
||||
success := false
|
||||
var lastErr error
|
||||
for _, ep := range p.repoInfo.Index.Mirrors {
|
||||
ep += "v1/"
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.Name.Name(), ep))
|
||||
if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil {
|
||||
// Don't report errors when pulling from mirrors.
|
||||
logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err)
|
||||
continue
|
||||
}
|
||||
success = true
|
||||
break
|
||||
}
|
||||
if !success {
|
||||
for _, ep := range repoData.Endpoints {
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.Name.Name(), ep)
|
||||
if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil {
|
||||
// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
|
||||
// As the error is also given to the output stream the user will see the error.
|
||||
lastErr = err
|
||||
progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err)
|
||||
continue
|
||||
}
|
||||
success = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !success {
|
||||
err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.Name.Name(), lastErr)
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) {
|
||||
var history []string
|
||||
history, err = p.session.GetRemoteHistory(v1ID, endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(history) < 1 {
|
||||
return fmt.Errorf("empty history for image %s", v1ID)
|
||||
}
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers")
|
||||
|
||||
var (
|
||||
descriptors []xfer.DownloadDescriptor
|
||||
newHistory []image.History
|
||||
imgJSON []byte
|
||||
imgSize int64
|
||||
)
|
||||
|
||||
// Iterate over layers, in order from bottom-most to top-most. Download
|
||||
// config for all layers and create descriptors.
|
||||
for i := len(history) - 1; i >= 0; i-- {
|
||||
v1LayerID := history[i]
|
||||
imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new-style config from the legacy configs
|
||||
h, err := v1.HistoryFromConfig(imgJSON, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newHistory = append(newHistory, h)
|
||||
|
||||
layerDescriptor := &v1LayerDescriptor{
|
||||
v1LayerID: v1LayerID,
|
||||
indexName: p.repoInfo.Index.Name,
|
||||
endpoint: endpoint,
|
||||
v1IDService: p.v1IDService,
|
||||
layersDownloaded: layersDownloaded,
|
||||
layerSize: imgSize,
|
||||
session: p.session,
|
||||
}
|
||||
|
||||
descriptors = append(descriptors, layerDescriptor)
|
||||
}
|
||||
|
||||
rootFS := image.NewRootFS()
|
||||
resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, "", descriptors, p.config.ProgressOutput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imageID, err := p.config.ImageStore.Put(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.config.ReferenceStore != nil {
|
||||
if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) {
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata")
|
||||
|
||||
retries := 5
|
||||
for j := 1; j <= retries; j++ {
|
||||
imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint)
|
||||
if err != nil && j == retries {
|
||||
progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata")
|
||||
return nil, 0, err
|
||||
} else if err != nil {
|
||||
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
return imgJSON, imgSize, nil
|
||||
}
|
||||
|
||||
// not reached
|
||||
return nil, 0, nil
|
||||
}
|
||||
|
||||
type v1LayerDescriptor struct {
|
||||
v1LayerID string
|
||||
indexName string
|
||||
endpoint string
|
||||
v1IDService *metadata.V1IDService
|
||||
layersDownloaded *bool
|
||||
layerSize int64
|
||||
session *registry.Session
|
||||
tmpFile *os.File
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Key() string {
|
||||
return "v1:" + ld.v1LayerID
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) ID() string {
|
||||
return stringid.TruncateID(ld.v1LayerID)
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) {
|
||||
return ld.v1IDService.Get(ld.v1LayerID, ld.indexName)
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
|
||||
progress.Update(progressOutput, ld.ID(), "Pulling fs layer")
|
||||
layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize)
|
||||
if err != nil {
|
||||
progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers")
|
||||
if uerr, ok := err.(*url.Error); ok {
|
||||
err = uerr.Err
|
||||
}
|
||||
if terr, ok := err.(net.Error); ok && terr.Timeout() {
|
||||
return nil, 0, err
|
||||
}
|
||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
*ld.layersDownloaded = true
|
||||
|
||||
ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob")
|
||||
if err != nil {
|
||||
layerReader.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading")
|
||||
defer reader.Close()
|
||||
|
||||
_, err = io.Copy(ld.tmpFile, reader)
|
||||
if err != nil {
|
||||
ld.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
progress.Update(progressOutput, ld.ID(), "Download complete")
|
||||
|
||||
logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name())
|
||||
|
||||
ld.tmpFile.Seek(0, 0)
|
||||
|
||||
// hand off the temporary file to the download manager, so it will only
|
||||
// be closed once
|
||||
tmpFile := ld.tmpFile
|
||||
ld.tmpFile = nil
|
||||
|
||||
return ioutils.NewReadCloserWrapper(tmpFile, func() error {
|
||||
tmpFile.Close()
|
||||
err := os.RemoveAll(tmpFile.Name())
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
|
||||
}
|
||||
return err
|
||||
}), ld.layerSize, nil
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Close() {
|
||||
if ld.tmpFile != nil {
|
||||
ld.tmpFile.Close()
|
||||
if err := os.RemoveAll(ld.tmpFile.Name()); err != nil {
|
||||
logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
|
||||
}
|
||||
ld.tmpFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) {
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID)
|
||||
}
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
v1 "github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
|
@ -392,6 +392,10 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform
|
|||
if p.config.RequireSchema2 {
|
||||
return false, fmt.Errorf("invalid manifest: not schema2")
|
||||
}
|
||||
msg := schema1DeprecationMessage(ref)
|
||||
logrus.Warn(msg)
|
||||
progress.Message(p.config.ProgressOutput, "", msg)
|
||||
|
||||
id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -787,6 +791,10 @@ func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mf
|
|||
|
||||
switch v := manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
msg := schema1DeprecationMessage(ref)
|
||||
logrus.Warn(msg)
|
||||
progress.Message(p.config.ProgressOutput, "", msg)
|
||||
|
||||
platform := toOCIPlatform(manifestMatches[0].Platform)
|
||||
id, _, err = p.pullSchema1(ctx, manifestRef, v, &platform)
|
||||
if err != nil {
|
||||
|
|
|
@ -41,13 +41,7 @@ func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *reg
|
|||
config: imagePushConfig,
|
||||
}, nil
|
||||
case registry.APIVersion1:
|
||||
return &v1Pusher{
|
||||
v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore),
|
||||
ref: ref,
|
||||
endpoint: endpoint,
|
||||
repoInfo: repoInfo,
|
||||
config: imagePushConfig,
|
||||
}, nil
|
||||
return nil, fmt.Errorf("protocol version %d no longer supported. Please contact admins of registry %s", endpoint.Version, endpoint.URL)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL)
|
||||
}
|
||||
|
|
|
@ -1,457 +0,0 @@
|
|||
package distribution // import "github.com/docker/docker/distribution"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type v1Pusher struct {
|
||||
v1IDService *metadata.V1IDService
|
||||
endpoint registry.APIEndpoint
|
||||
ref reference.Named
|
||||
repoInfo *registry.RepositoryInfo
|
||||
config *ImagePushConfig
|
||||
session *registry.Session
|
||||
}
|
||||
|
||||
func (p *v1Pusher) Push(ctx context.Context) error {
|
||||
tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
|
||||
tr := transport.NewTransport(
|
||||
// TODO(tiborvass): was NoTimeout
|
||||
registry.NewTransport(tlsConfig),
|
||||
registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)...,
|
||||
)
|
||||
client := registry.HTTPClient(tr)
|
||||
v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)
|
||||
p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
|
||||
if err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
return fallbackError{err: err}
|
||||
}
|
||||
if err := p.pushRepository(ctx); err != nil {
|
||||
// TODO(dmcgowan): Check if should fallback
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an
|
||||
// image being pushed to a v1 registry.
|
||||
type v1Image interface {
|
||||
Config() []byte
|
||||
Layer() layer.Layer
|
||||
V1ID() string
|
||||
}
|
||||
|
||||
type v1ImageCommon struct {
|
||||
layer layer.Layer
|
||||
config []byte
|
||||
v1ID string
|
||||
}
|
||||
|
||||
func (common *v1ImageCommon) Config() []byte {
|
||||
return common.config
|
||||
}
|
||||
|
||||
func (common *v1ImageCommon) V1ID() string {
|
||||
return common.v1ID
|
||||
}
|
||||
|
||||
func (common *v1ImageCommon) Layer() layer.Layer {
|
||||
return common.layer
|
||||
}
|
||||
|
||||
// v1TopImage defines a runnable (top layer) image being pushed to a v1
|
||||
// registry.
|
||||
type v1TopImage struct {
|
||||
v1ImageCommon
|
||||
imageID image.ID
|
||||
}
|
||||
|
||||
func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) {
|
||||
v1ID := imageID.Digest().Hex()
|
||||
parentV1ID := ""
|
||||
if parent != nil {
|
||||
parentV1ID = parent.V1ID()
|
||||
}
|
||||
|
||||
config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1TopImage{
|
||||
v1ImageCommon: v1ImageCommon{
|
||||
v1ID: v1ID,
|
||||
config: config,
|
||||
layer: l,
|
||||
},
|
||||
imageID: imageID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// v1DependencyImage defines a dependency layer being pushed to a v1 registry.
|
||||
type v1DependencyImage struct {
|
||||
v1ImageCommon
|
||||
}
|
||||
|
||||
func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) *v1DependencyImage {
|
||||
v1ID := digest.Digest(l.ChainID()).Hex()
|
||||
|
||||
var config string
|
||||
if parent != nil {
|
||||
config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID())
|
||||
} else {
|
||||
config = fmt.Sprintf(`{"id":"%s"}`, v1ID)
|
||||
}
|
||||
return &v1DependencyImage{
|
||||
v1ImageCommon: v1ImageCommon{
|
||||
v1ID: v1ID,
|
||||
config: []byte(config),
|
||||
layer: l,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the all the images to be uploaded in the correct order
|
||||
func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) {
|
||||
tagsByImage = make(map[image.ID][]string)
|
||||
|
||||
// Ignore digest references
|
||||
if _, isCanonical := p.ref.(reference.Canonical); isCanonical {
|
||||
return
|
||||
}
|
||||
|
||||
tagged, isTagged := p.ref.(reference.NamedTagged)
|
||||
if isTagged {
|
||||
// Push a specific tag
|
||||
var imgID image.ID
|
||||
var dgst digest.Digest
|
||||
dgst, err = p.config.ReferenceStore.Get(p.ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
imgID = image.IDFromDigest(dgst)
|
||||
|
||||
imageList, err = p.imageListForTag(imgID, nil, &referencedLayers)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tagsByImage[imgID] = []string{tagged.Tag()}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
imagesSeen := make(map[digest.Digest]struct{})
|
||||
dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage)
|
||||
|
||||
associations := p.config.ReferenceStore.ReferencesByName(p.ref)
|
||||
for _, association := range associations {
|
||||
if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged {
|
||||
// Ignore digest references.
|
||||
continue
|
||||
}
|
||||
|
||||
imgID := image.IDFromDigest(association.ID)
|
||||
tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag())
|
||||
|
||||
if _, present := imagesSeen[association.ID]; present {
|
||||
// Skip generating image list for already-seen image
|
||||
continue
|
||||
}
|
||||
imagesSeen[association.ID] = struct{}{}
|
||||
|
||||
imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
// append to main image list
|
||||
imageList = append(imageList, imageListForThisTag...)
|
||||
}
|
||||
if len(imageList) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag")
|
||||
}
|
||||
logrus.Debugf("Image list: %v", imageList)
|
||||
logrus.Debugf("Tags by image: %v", tagsByImage)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) {
|
||||
ics, ok := p.config.ImageStore.(*imageConfigStore)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("only image store images supported for v1 push")
|
||||
}
|
||||
img, err := ics.Store.Get(imgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
topLayerID := img.RootFS.ChainID()
|
||||
|
||||
if !system.IsOSSupported(img.OperatingSystem()) {
|
||||
return nil, system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
pl, err := p.config.LayerStores[img.OperatingSystem()].Get(topLayerID)
|
||||
*referencedLayers = append(*referencedLayers, pl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get top layer from image: %v", err)
|
||||
}
|
||||
|
||||
// V1 push is deprecated, only support existing layerstore layers
|
||||
lsl, ok := pl.(*storeLayer)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("only layer store layers supported for v1 push")
|
||||
}
|
||||
l := lsl.Layer
|
||||
|
||||
dependencyImages, parent := generateDependencyImages(l.Parent(), dependenciesSeen)
|
||||
|
||||
topImage, err := newV1TopImage(imgID, img, l, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
imageListForThisTag = append(dependencyImages, topImage)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage) {
|
||||
if l == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
imageListForThisTag, parent = generateDependencyImages(l.Parent(), dependenciesSeen)
|
||||
|
||||
if dependenciesSeen != nil {
|
||||
if dependencyImage, present := dependenciesSeen[l.ChainID()]; present {
|
||||
// This layer is already on the list, we can ignore it
|
||||
// and all its parents.
|
||||
return imageListForThisTag, dependencyImage
|
||||
}
|
||||
}
|
||||
|
||||
dependencyImage := newV1DependencyImage(l, parent)
|
||||
imageListForThisTag = append(imageListForThisTag, dependencyImage)
|
||||
|
||||
if dependenciesSeen != nil {
|
||||
dependenciesSeen[l.ChainID()] = dependencyImage
|
||||
}
|
||||
|
||||
return imageListForThisTag, dependencyImage
|
||||
}
|
||||
|
||||
// createImageIndex returns an index of an image's layer IDs and tags.
|
||||
func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData {
|
||||
var imageIndex []*registry.ImgData
|
||||
for _, img := range images {
|
||||
v1ID := img.V1ID()
|
||||
|
||||
if topImage, isTopImage := img.(*v1TopImage); isTopImage {
|
||||
if tags, hasTags := tags[topImage.imageID]; hasTags {
|
||||
// If an image has tags you must add an entry in the image index
|
||||
// for each tag
|
||||
for _, tag := range tags {
|
||||
imageIndex = append(imageIndex, ®istry.ImgData{
|
||||
ID: v1ID,
|
||||
Tag: tag,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// If the image does not have a tag it still needs to be sent to the
|
||||
// registry with an empty tag so that it is associated with the repository
|
||||
imageIndex = append(imageIndex, ®istry.ImgData{
|
||||
ID: v1ID,
|
||||
Tag: "",
|
||||
})
|
||||
}
|
||||
return imageIndex
|
||||
}
|
||||
|
||||
// lookupImageOnEndpoint checks the specified endpoint to see if an image exists
|
||||
// and if it is absent then it sends the image id to the channel to be pushed.
|
||||
func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) {
|
||||
defer wg.Done()
|
||||
for image := range images {
|
||||
v1ID := image.V1ID()
|
||||
truncID := stringid.TruncateID(image.Layer().DiffID().String())
|
||||
if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil {
|
||||
logrus.Errorf("Error in LookupRemoteImage: %s", err)
|
||||
imagesToPush <- v1ID
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Waiting")
|
||||
} else {
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Already exists")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error {
|
||||
workerCount := len(imageList)
|
||||
// start a maximum of 5 workers to check if images exist on the specified endpoint.
|
||||
if workerCount > 5 {
|
||||
workerCount = 5
|
||||
}
|
||||
var (
|
||||
wg = &sync.WaitGroup{}
|
||||
imageData = make(chan v1Image, workerCount*2)
|
||||
imagesToPush = make(chan string, workerCount*2)
|
||||
pushes = make(chan map[string]struct{}, 1)
|
||||
)
|
||||
for i := 0; i < workerCount; i++ {
|
||||
wg.Add(1)
|
||||
go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush)
|
||||
}
|
||||
// start a go routine that consumes the images to push
|
||||
go func() {
|
||||
shouldPush := make(map[string]struct{})
|
||||
for id := range imagesToPush {
|
||||
shouldPush[id] = struct{}{}
|
||||
}
|
||||
pushes <- shouldPush
|
||||
}()
|
||||
for _, v1Image := range imageList {
|
||||
imageData <- v1Image
|
||||
}
|
||||
// close the channel to notify the workers that there will be no more images to check.
|
||||
close(imageData)
|
||||
wg.Wait()
|
||||
close(imagesToPush)
|
||||
// wait for all the images that require pushes to be collected into a consumable map.
|
||||
shouldPush := <-pushes
|
||||
// finish by pushing any images and tags to the endpoint. The order that the images are pushed
|
||||
// is very important that is why we are still iterating over the ordered list of imageIDs.
|
||||
for _, img := range imageList {
|
||||
v1ID := img.V1ID()
|
||||
if _, push := shouldPush[v1ID]; push {
|
||||
if _, err := p.pushImage(ctx, img, endpoint); err != nil {
|
||||
// FIXME: Continue on error?
|
||||
return err
|
||||
}
|
||||
}
|
||||
if topImage, isTopImage := img.(*v1TopImage); isTopImage {
|
||||
for _, tag := range tags[topImage.imageID] {
|
||||
progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+reference.Path(p.repoInfo.Name)+"/tags/"+tag)
|
||||
if err := p.session.PushRegistryTag(p.repoInfo.Name, v1ID, tag, endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pushRepository pushes layers that do not already exist on the registry.
|
||||
func (p *v1Pusher) pushRepository(ctx context.Context) error {
|
||||
imgList, tags, referencedLayers, err := p.getImageList()
|
||||
defer func() {
|
||||
for _, l := range referencedLayers {
|
||||
l.Release()
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imageIndex := createImageIndex(imgList, tags)
|
||||
for _, data := range imageIndex {
|
||||
logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag)
|
||||
}
|
||||
|
||||
// Register all the images in a repository with the registry
|
||||
// If an image is not in this list it will not be associated with the repository
|
||||
repoData, err := p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, false, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// push the repository to each of the endpoints only if it does not exist.
|
||||
for _, endpoint := range repoData.Endpoints {
|
||||
if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err = p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, true, repoData.Endpoints)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) {
|
||||
l := v1Image.Layer()
|
||||
v1ID := v1Image.V1ID()
|
||||
truncID := stringid.TruncateID(l.DiffID().String())
|
||||
|
||||
jsonRaw := v1Image.Config()
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Pushing")
|
||||
|
||||
// General rule is to use ID for graph accesses and compatibilityID for
|
||||
// calls to session.registry()
|
||||
imgData := ®istry.ImgData{
|
||||
ID: v1ID,
|
||||
}
|
||||
|
||||
// Send the json
|
||||
if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
|
||||
if err == registry.ErrAlreadyExists {
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping")
|
||||
return "", nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
arch, err := l.TarStream()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer arch.Close()
|
||||
|
||||
// don't care if this fails; best effort
|
||||
size, _ := l.DiffSize()
|
||||
|
||||
// Send the layer
|
||||
logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size)
|
||||
|
||||
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing")
|
||||
defer reader.Close()
|
||||
|
||||
checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
imgData.Checksum = checksum
|
||||
imgData.ChecksumPayload = checksumPayload
|
||||
// Send the checksum
|
||||
if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil {
|
||||
logrus.Warnf("Could not set v1 ID mapping: %v", err)
|
||||
}
|
||||
|
||||
progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed")
|
||||
return imgData.Checksum, nil
|
||||
}
|
|
@ -5,6 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -180,8 +181,30 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id
|
|||
|
||||
putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())}
|
||||
if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
|
||||
logrus.Warnf("failed to upload schema2 manifest: %v", err)
|
||||
return err
|
||||
if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 {
|
||||
logrus.Warnf("failed to upload schema2 manifest: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err)
|
||||
|
||||
msg := schema1DeprecationMessage(ref)
|
||||
logrus.Warn(msg)
|
||||
progress.Message(p.config.ProgressOutput, "", msg)
|
||||
|
||||
manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig)
|
||||
manifest, err = manifestFromBuilder(ctx, builder, descriptors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var canonicalManifest []byte
|
||||
|
|
|
@ -156,3 +156,7 @@ func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[s
|
|||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token))
|
||||
return nil
|
||||
}
|
||||
|
||||
func schema1DeprecationMessage(ref reference.Named) string {
|
||||
return fmt.Sprintf("[DEPRECATION NOTICE] registry v2 schema1 support will be removed in an upcoming release. Please contact admins of the %s registry NOW to avoid future disruption.", reference.Domain(ref))
|
||||
}
|
||||
|
|
|
@ -14,4 +14,4 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|||
. ${SCRIPTDIR}/toml
|
||||
. ${SCRIPTDIR}/changelog-well-formed
|
||||
. ${SCRIPTDIR}/changelog-date-descending
|
||||
. ${SCRIPTDIR}/deprecate-integration-cli
|
||||
#. ${SCRIPTDIR}/deprecate-integration-cli
|
||||
|
|
|
@ -141,6 +141,39 @@ func (s *DockerRegistrySuite) TearDownTest(c *check.C) {
|
|||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerSchema1RegistrySuite{
|
||||
ds: &DockerSuite{},
|
||||
})
|
||||
}
|
||||
|
||||
type DockerSchema1RegistrySuite struct {
|
||||
ds *DockerSuite
|
||||
reg *registry.V2
|
||||
d *daemon.Daemon
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) {
|
||||
s.d.DumpStackAndQuit()
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64, testEnv.IsLocalDaemon)
|
||||
s.reg = registry.NewV2(c, registry.Schema1)
|
||||
s.reg.WaitReady(c)
|
||||
s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution))
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) {
|
||||
if s.reg != nil {
|
||||
s.reg.Close()
|
||||
}
|
||||
if s.d != nil {
|
||||
s.d.Stop(c)
|
||||
}
|
||||
s.ds.TearDownTest(c)
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&DockerRegistryAuthHtpasswdSuite{
|
||||
ds: &DockerSuite{},
|
||||
|
|
|
@ -3,9 +3,12 @@ package main
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
|
@ -77,6 +80,10 @@ func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
|
|||
testPullByTagDisplaysDigest(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
|
||||
testPullByTagDisplaysDigest(c)
|
||||
}
|
||||
|
||||
func testPullByDigest(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
pushDigest, err := setupImage(c)
|
||||
|
@ -99,6 +106,10 @@ func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) {
|
|||
testPullByDigest(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) {
|
||||
testPullByDigest(c)
|
||||
}
|
||||
|
||||
func testPullByDigestNoFallback(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
// pull from the registry using the <name>@<digest> reference
|
||||
|
@ -112,6 +123,10 @@ func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) {
|
|||
testPullByDigestNoFallback(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) {
|
||||
testPullByDigestNoFallback(c)
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) {
|
||||
pushDigest, err := setupImage(c)
|
||||
assert.NilError(c, err, "error setting up image")
|
||||
|
@ -546,3 +561,131 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) {
|
|||
expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest)
|
||||
assert.Assert(c, is.Contains(out, expectedErrorMsg))
|
||||
}
|
||||
|
||||
// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when
|
||||
// we have modified a manifest blob and its digest cannot be verified.
|
||||
// This is the schema1 version of the test.
|
||||
func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
manifestDigest, err := setupImage(c)
|
||||
c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
|
||||
|
||||
// Load the target manifest blob.
|
||||
manifestBlob := s.reg.ReadBlobContents(c, manifestDigest)
|
||||
|
||||
var imgManifest schema1.Manifest
|
||||
err = json.Unmarshal(manifestBlob, &imgManifest)
|
||||
c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob"))
|
||||
|
||||
// Change a layer in the manifest.
|
||||
imgManifest.FSLayers[0] = schema1.FSLayer{
|
||||
BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"),
|
||||
}
|
||||
|
||||
// Move the existing data file aside, so that we can replace it with a
|
||||
// malicious blob of data. NOTE: we defer the returned undo func.
|
||||
undo := s.reg.TempMoveBlobData(c, manifestDigest)
|
||||
defer undo()
|
||||
|
||||
alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON"))
|
||||
|
||||
s.reg.WriteBlobContents(c, manifestDigest, alteredManifestBlob)
|
||||
|
||||
// Now try pulling that image by digest. We should get an error about
|
||||
// digest verification for the manifest digest.
|
||||
|
||||
// Pull from the registry using the <name>@<digest> reference.
|
||||
imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
|
||||
out, exitStatus, _ := dockerCmdWithError("pull", imageReference)
|
||||
c.Assert(exitStatus, checker.Not(check.Equals), 0)
|
||||
|
||||
expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest)
|
||||
c.Assert(out, checker.Contains, expectedErrorMsg)
|
||||
}
|
||||
|
||||
// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when
|
||||
// we have modified a layer blob and its digest cannot be verified.
|
||||
// This is the schema2 version of the test.
|
||||
func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
manifestDigest, err := setupImage(c)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Load the target manifest blob.
|
||||
manifestBlob := s.reg.ReadBlobContents(c, manifestDigest)
|
||||
|
||||
var imgManifest schema2.Manifest
|
||||
err = json.Unmarshal(manifestBlob, &imgManifest)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Next, get the digest of one of the layers from the manifest.
|
||||
targetLayerDigest := imgManifest.Layers[0].Digest
|
||||
|
||||
// Move the existing data file aside, so that we can replace it with a
|
||||
// malicious blob of data. NOTE: we defer the returned undo func.
|
||||
undo := s.reg.TempMoveBlobData(c, targetLayerDigest)
|
||||
defer undo()
|
||||
|
||||
// Now make a fake data blob in this directory.
|
||||
s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for."))
|
||||
|
||||
// Now try pulling that image by digest. We should get an error about
|
||||
// digest verification for the target layer digest.
|
||||
|
||||
// Remove distribution cache to force a re-pull of the blobs
|
||||
if err := os.RemoveAll(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "image", s.d.StorageDriver(), "distribution")); err != nil {
|
||||
c.Fatalf("error clearing distribution cache: %v", err)
|
||||
}
|
||||
|
||||
// Pull from the registry using the <name>@<digest> reference.
|
||||
imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
|
||||
out, exitStatus, _ := dockerCmdWithError("pull", imageReference)
|
||||
c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status"))
|
||||
|
||||
expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest)
|
||||
c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out))
|
||||
}
|
||||
|
||||
// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when
|
||||
// we have modified a layer blob and its digest cannot be verified.
|
||||
// This is the schema1 version of the test.
|
||||
func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
manifestDigest, err := setupImage(c)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Load the target manifest blob.
|
||||
manifestBlob := s.reg.ReadBlobContents(c, manifestDigest)
|
||||
|
||||
var imgManifest schema1.Manifest
|
||||
err = json.Unmarshal(manifestBlob, &imgManifest)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Next, get the digest of one of the layers from the manifest.
|
||||
targetLayerDigest := imgManifest.FSLayers[0].BlobSum
|
||||
|
||||
// Move the existing data file aside, so that we can replace it with a
|
||||
// malicious blob of data. NOTE: we defer the returned undo func.
|
||||
undo := s.reg.TempMoveBlobData(c, targetLayerDigest)
|
||||
defer undo()
|
||||
|
||||
// Now make a fake data blob in this directory.
|
||||
s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for."))
|
||||
|
||||
// Now try pulling that image by digest. We should get an error about
|
||||
// digest verification for the target layer digest.
|
||||
|
||||
// Remove distribution cache to force a re-pull of the blobs
|
||||
if err := os.RemoveAll(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "image", s.d.StorageDriver(), "distribution")); err != nil {
|
||||
c.Fatalf("error clearing distribution cache: %v", err)
|
||||
}
|
||||
|
||||
// Pull from the registry using the <name>@<digest> reference.
|
||||
imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
|
||||
out, exitStatus, _ := dockerCmdWithError("pull", imageReference)
|
||||
c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status"))
|
||||
|
||||
expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest)
|
||||
c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out))
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/docker/libnetwork/iptables"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/go-check/check"
|
||||
"github.com/kr/pty"
|
||||
"golang.org/x/sys/unix"
|
||||
|
@ -550,6 +551,23 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) {
|
||||
// TODO: skip or update for Windows daemon
|
||||
os.Remove("/etc/docker/key.json")
|
||||
s.d.Start(c)
|
||||
s.d.Stop(c)
|
||||
|
||||
k, err := libtrust.LoadKeyFile("/etc/docker/key.json")
|
||||
if err != nil {
|
||||
c.Fatalf("Error opening key file")
|
||||
}
|
||||
kid := k.KeyID()
|
||||
// Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF)
|
||||
if len(kid) != 59 {
|
||||
c.Fatalf("Bad key ID: %s", kid)
|
||||
}
|
||||
}
|
||||
|
||||
// GH#11320 - verify that the daemon exits on failure properly
|
||||
// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means
|
||||
// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required
|
||||
|
@ -1174,6 +1192,59 @@ func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) {
|
||||
type Config struct {
|
||||
Crv string `json:"crv"`
|
||||
D string `json:"d"`
|
||||
Kid string `json:"kid"`
|
||||
Kty string `json:"kty"`
|
||||
X string `json:"x"`
|
||||
Y string `json:"y"`
|
||||
}
|
||||
|
||||
os.Remove("/etc/docker/key.json")
|
||||
s.d.Start(c)
|
||||
s.d.Stop(c)
|
||||
|
||||
config := &Config{}
|
||||
bytes, err := ioutil.ReadFile("/etc/docker/key.json")
|
||||
if err != nil {
|
||||
c.Fatalf("Error reading key.json file: %s", err)
|
||||
}
|
||||
|
||||
// byte[] to Data-Struct
|
||||
if err := json.Unmarshal(bytes, &config); err != nil {
|
||||
c.Fatalf("Error Unmarshal: %s", err)
|
||||
}
|
||||
|
||||
//replace config.Kid with the fake value
|
||||
config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4"
|
||||
|
||||
// NEW Data-Struct to byte[]
|
||||
newBytes, err := json.Marshal(&config)
|
||||
if err != nil {
|
||||
c.Fatalf("Error Marshal: %s", err)
|
||||
}
|
||||
|
||||
// write back
|
||||
if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil {
|
||||
c.Fatalf("Error ioutil.WriteFile: %s", err)
|
||||
}
|
||||
|
||||
defer os.Remove("/etc/docker/key.json")
|
||||
|
||||
if err := s.d.StartWithError(); err == nil {
|
||||
c.Fatalf("It should not be successful to start daemon with wrong key: %v", err)
|
||||
}
|
||||
|
||||
content, err := s.d.ReadLogFile()
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
if !strings.Contains(string(content), "Public Key ID does not match") {
|
||||
c.Fatalf("Missing KeyID message from daemon logs: %s", string(content))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) {
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
|
|
|
@ -56,6 +56,10 @@ func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) {
|
|||
testPullImageWithAliases(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) {
|
||||
testPullImageWithAliases(c)
|
||||
}
|
||||
|
||||
// testConcurrentPullWholeRepo pulls the same repo concurrently.
|
||||
func testConcurrentPullWholeRepo(c *check.C) {
|
||||
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
|
||||
|
@ -108,6 +112,10 @@ func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) {
|
|||
testConcurrentPullWholeRepo(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) {
|
||||
testConcurrentPullWholeRepo(c)
|
||||
}
|
||||
|
||||
// testConcurrentFailingPull tries a concurrent pull that doesn't succeed.
|
||||
func testConcurrentFailingPull(c *check.C) {
|
||||
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
|
||||
|
@ -135,6 +143,10 @@ func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) {
|
|||
testConcurrentFailingPull(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) {
|
||||
testConcurrentFailingPull(c)
|
||||
}
|
||||
|
||||
// testConcurrentPullMultipleTags pulls multiple tags from the same repo
|
||||
// concurrently.
|
||||
func testConcurrentPullMultipleTags(c *check.C) {
|
||||
|
@ -187,6 +199,10 @@ func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
|
|||
testConcurrentPullMultipleTags(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
|
||||
testConcurrentPullMultipleTags(c)
|
||||
}
|
||||
|
||||
// testPullIDStability verifies that pushing an image and pulling it back
|
||||
// preserves the image ID.
|
||||
func testPullIDStability(c *check.C) {
|
||||
|
@ -244,6 +260,10 @@ func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) {
|
|||
testPullIDStability(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) {
|
||||
testPullIDStability(c)
|
||||
}
|
||||
|
||||
// #21213
|
||||
func testPullNoLayers(c *check.C) {
|
||||
repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL)
|
||||
|
@ -260,6 +280,10 @@ func (s *DockerRegistrySuite) TestPullNoLayers(c *check.C) {
|
|||
testPullNoLayers(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *check.C) {
|
||||
testPullNoLayers(c)
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) {
|
||||
testRequires(c, NotArm)
|
||||
pushDigest, err := setupImage(c)
|
||||
|
|
|
@ -30,6 +30,10 @@ func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) {
|
|||
testPushBusyboxImage(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPushBusyboxImage(c *check.C) {
|
||||
testPushBusyboxImage(c)
|
||||
}
|
||||
|
||||
// pushing an image without a prefix should throw an error
|
||||
func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) {
|
||||
out, _, err := dockerCmdWithError("push", "busybox")
|
||||
|
@ -49,6 +53,10 @@ func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) {
|
|||
testPushUntagged(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPushUntagged(c *check.C) {
|
||||
testPushUntagged(c)
|
||||
}
|
||||
|
||||
func testPushBadTag(c *check.C) {
|
||||
repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL)
|
||||
expected := "does not exist"
|
||||
|
@ -62,6 +70,10 @@ func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) {
|
|||
testPushBadTag(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPushBadTag(c *check.C) {
|
||||
testPushBadTag(c)
|
||||
}
|
||||
|
||||
func testPushMultipleTags(c *check.C) {
|
||||
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
|
||||
repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL)
|
||||
|
@ -103,6 +115,10 @@ func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) {
|
|||
testPushMultipleTags(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPushMultipleTags(c *check.C) {
|
||||
testPushMultipleTags(c)
|
||||
}
|
||||
|
||||
func testPushEmptyLayer(c *check.C) {
|
||||
repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL)
|
||||
emptyTarball, err := ioutil.TempFile("", "empty_tarball")
|
||||
|
@ -130,6 +146,10 @@ func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) {
|
|||
testPushEmptyLayer(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) {
|
||||
testPushEmptyLayer(c)
|
||||
}
|
||||
|
||||
// testConcurrentPush pushes multiple tags to the same repo
|
||||
// concurrently.
|
||||
func testConcurrentPush(c *check.C) {
|
||||
|
@ -180,6 +200,10 @@ func (s *DockerRegistrySuite) TestConcurrentPush(c *check.C) {
|
|||
testConcurrentPush(c)
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestConcurrentPush(c *check.C) {
|
||||
testConcurrentPush(c)
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) {
|
||||
sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
|
||||
// tag the image to upload it to the private registry
|
||||
|
@ -222,6 +246,39 @@ func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) {
|
|||
assert.Equal(c, out4, "hello world")
|
||||
}
|
||||
|
||||
func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) {
|
||||
sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
|
||||
// tag the image to upload it to the private registry
|
||||
dockerCmd(c, "tag", "busybox", sourceRepoName)
|
||||
// push the image to the registry
|
||||
out1, _, err := dockerCmdWithError("push", sourceRepoName)
|
||||
assert.NilError(c, err, fmt.Sprintf("pushing the image to the private registry has failed: %s", out1))
|
||||
// ensure that none of the layers were mounted from another repository during push
|
||||
assert.Assert(c, !strings.Contains(out1, "Mounted from"))
|
||||
|
||||
digest1 := reference.DigestRegexp.FindString(out1)
|
||||
assert.Assert(c, len(digest1) > 0, "no digest found for pushed manifest")
|
||||
|
||||
destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL)
|
||||
// retag the image to upload the same layers to another repo in the same registry
|
||||
dockerCmd(c, "tag", "busybox", destRepoName)
|
||||
// push the image to the registry
|
||||
out2, _, err := dockerCmdWithError("push", destRepoName)
|
||||
assert.NilError(c, err, fmt.Sprintf("pushing the image to the private registry has failed: %s", out2))
|
||||
// schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen
|
||||
assert.Assert(c, !strings.Contains(out2, "Mounted from"))
|
||||
|
||||
digest2 := reference.DigestRegexp.FindString(out2)
|
||||
assert.Assert(c, len(digest2) > 0, "no digest found for pushed manifest")
|
||||
assert.Assert(c, digest1 != digest2)
|
||||
|
||||
// ensure that we can pull and run the second pushed repository
|
||||
dockerCmd(c, "rmi", destRepoName)
|
||||
dockerCmd(c, "pull", destRepoName)
|
||||
out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world")
|
||||
assert.Assert(c, out3 == "hello world")
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) TestPushNoCredentialsNoRetry(c *check.C) {
|
||||
repoName := fmt.Sprintf("%s/busybox", privateRegistryURL)
|
||||
dockerCmd(c, "tag", "busybox", repoName)
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -13,6 +15,7 @@ import (
|
|||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration/internal/swarm"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
|
@ -417,6 +420,26 @@ func TestConfigCreateResolve(t *testing.T) {
|
|||
assert.Assert(t, is.Equal(0, len(entries)))
|
||||
}
|
||||
|
||||
func TestConfigDaemonLibtrustID(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "linux")
|
||||
defer setupTest(t)()
|
||||
|
||||
d := daemon.New(t)
|
||||
defer d.Stop(t)
|
||||
|
||||
trustKey := filepath.Join(d.RootDir(), "key.json")
|
||||
err := ioutil.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644)
|
||||
assert.NilError(t, err)
|
||||
|
||||
config := filepath.Join(d.RootDir(), "daemon.json")
|
||||
err = ioutil.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644)
|
||||
assert.NilError(t, err)
|
||||
|
||||
d.Start(t, "--config-file", config)
|
||||
info := d.Info(t)
|
||||
assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB")
|
||||
}
|
||||
|
||||
func configNamesFromList(entries []swarmtypes.Config) []string {
|
||||
var values []string
|
||||
for _, entry := range entries {
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/google/uuid"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/skip"
|
||||
)
|
||||
|
||||
func TestUUIDGeneration(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "ID format changed")
|
||||
defer setupTest(t)()
|
||||
|
||||
c := testEnv.APIClient()
|
||||
info, err := c.Info(context.Background())
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = uuid.Parse(info.ID)
|
||||
assert.NilError(t, err, info.ID)
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
package registry // import "github.com/docker/docker/registry"
|
||||
|
||||
import "net/url"
|
||||
|
||||
func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
||||
if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname {
|
||||
return []APIEndpoint{}, nil
|
||||
}
|
||||
|
||||
tlsConfig, err := s.tlsConfig(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoints = []APIEndpoint{
|
||||
{
|
||||
URL: &url.URL{
|
||||
Scheme: "https",
|
||||
Host: hostname,
|
||||
},
|
||||
Version: APIVersion1,
|
||||
TrimHostname: true,
|
||||
TLSConfig: tlsConfig,
|
||||
},
|
||||
}
|
||||
|
||||
if tlsConfig.InsecureSkipVerify {
|
||||
endpoints = append(endpoints, APIEndpoint{ // or this
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: hostname,
|
||||
},
|
||||
Version: APIVersion1,
|
||||
TrimHostname: true,
|
||||
// used to check if supposed to be secure via InsecureSkipVerify
|
||||
TLSConfig: tlsConfig,
|
||||
})
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
package registry // import "github.com/docker/docker/registry"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/skip"
|
||||
)
|
||||
|
||||
func TestLookupV1Endpoints(t *testing.T) {
|
||||
skip.If(t, os.Getuid() != 0, "skipping test that requires root")
|
||||
s, err := NewService(ServiceOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
hostname string
|
||||
expectedLen int
|
||||
}{
|
||||
{"example.com", 1},
|
||||
{DefaultNamespace, 0},
|
||||
{DefaultV2Registry.Host, 0},
|
||||
{IndexHostname, 0},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if ret, err := s.lookupV1Endpoints(c.hostname); err != nil || len(ret) != c.expectedLen {
|
||||
t.Errorf("lookupV1Endpoints(`"+c.hostname+"`) returned %+v and %+v", ret, err)
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue