From 500e77bad0b19b3b1c8e6ac195485adcb70daef1 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 18 Nov 2015 14:15:00 -0800 Subject: [PATCH 1/7] Add layer store Layer store manages read-only and read-write layers on a union file system. Read only layers are always referenced by content addresses. Read-write layer identifiers are handled by the caller but upon registering its difference, the committed read-only layer will be referenced by content hash. Signed-off-by: Derek McGowan (github: dmcgowan) Signed-off-by: Tonis Tiigi --- layer/empty.go | 47 +++ layer/empty_test.go | 46 +++ layer/filestore.go | 318 ++++++++++++++++++ layer/filestore_test.go | 119 +++++++ layer/layer.go | 238 +++++++++++++ layer/layer_store.go | 649 +++++++++++++++++++++++++++++++++++ layer/layer_test.go | 725 ++++++++++++++++++++++++++++++++++++++++ layer/layer_windows.go | 109 ++++++ layer/migration.go | 251 ++++++++++++++ layer/migration_test.go | 385 +++++++++++++++++++++ layer/mount_test.go | 217 ++++++++++++ layer/mounted_layer.go | 64 ++++ layer/ro_layer.go | 110 ++++++ 13 files changed, 3278 insertions(+) create mode 100644 layer/empty.go create mode 100644 layer/empty_test.go create mode 100644 layer/filestore.go create mode 100644 layer/filestore_test.go create mode 100644 layer/layer.go create mode 100644 layer/layer_store.go create mode 100644 layer/layer_test.go create mode 100644 layer/layer_windows.go create mode 100644 layer/migration.go create mode 100644 layer/migration_test.go create mode 100644 layer/mount_test.go create mode 100644 layer/mounted_layer.go create mode 100644 layer/ro_layer.go diff --git a/layer/empty.go b/layer/empty.go new file mode 100644 index 0000000000..04d2a20b88 --- /dev/null +++ b/layer/empty.go @@ -0,0 +1,47 @@ +package layer + +import ( + "archive/tar" + "bytes" + "io" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.Reader, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return buf, nil +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} diff --git a/layer/empty_test.go b/layer/empty_test.go new file mode 100644 index 0000000000..c22da7665d --- /dev/null +++ b/layer/empty_test.go @@ -0,0 +1,46 @@ +package layer + +import ( + "io" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestEmptyLayer(t *testing.T) { + if EmptyLayer.ChainID() != ChainID(DigestSHA256EmptyTar) { + t.Fatal("wrong ID for empty layer") + } + + if EmptyLayer.DiffID() != DigestSHA256EmptyTar { + t.Fatal("wrong DiffID for empty layer") + } + + if EmptyLayer.Parent() != nil { + t.Fatal("expected no parent for empty layer") + } + + if size, err := EmptyLayer.Size(); err != nil || size != 0 { + t.Fatal("expected zero size for empty layer") + } + + if diffSize, err := EmptyLayer.DiffSize(); err != nil || diffSize != 0 { + t.Fatal("expected zero diffsize for empty layer") + } + + tarStream, err := EmptyLayer.TarStream() + if err != nil { + t.Fatalf("error streaming tar for empty layer: %v", err) + } + + digester := digest.Canonical.New() + _, err = io.Copy(digester.Hash(), tarStream) + + if err != nil { + t.Fatalf("error hashing empty tar layer: %v", err) + } + + if digester.Digest() != digest.Digest(DigestSHA256EmptyTar) { + t.Fatal("empty layer tar stream hashes to wrong value") + } +} diff --git a/layer/filestore.go b/layer/filestore.go new file mode 100644 index 0000000000..02ed32653f --- /dev/null +++ b/layer/filestore.go @@ -0,0 +1,318 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + root string +} + +// NewFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func NewFSMetadataStore(root string) (MetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + + td, err := ioutil.TempDir(tmpDir, "layer-") + if err != nil { + return nil, err + } + // Create a new tempdir + return &fileMetadataTransaction{ + store: fms, + root: td, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return ioutil.WriteFile(filepath.Join(fm.root, "size"), []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return ioutil.WriteFile(filepath.Join(fm.root, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return ioutil.WriteFile(filepath.Join(fm.root, "diff"), []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter() (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + + fz := gzip.NewWriter(f) + + return ioutils.NewWriteCloserWrapper(fz, func() error { + fz.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + return os.Rename(fm.root, finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return os.RemoveAll(fm.root) +} + +func (fm *fileMetadataTransaction) String() string { + return fm.root +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(string(content)) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.ParseDigest(string(content)) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + + if !stringIDRegexp.MatchString(string(content)) { + return "", errors.New("invalid cache id value") + } + + return string(content), nil +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + + if !stringIDRegexp.MatchString(string(content)) { + return "", errors.New("invalid mount id value") + } + + return string(content), nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + if !stringIDRegexp.MatchString(string(content)) { + return "", errors.New("invalid init id value") + } + + return string(content), nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(string(content)) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/layer/filestore_test.go b/layer/filestore_test.go new file mode 100644 index 0000000000..4dae3f8300 --- /dev/null +++ b/layer/filestore_test.go @@ -0,0 +1,119 @@ +package layer + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "syscall" + "testing" + + "github.com/docker/distribution/digest" +) + +func randomLayerID(seed int64) ChainID { + r := rand.New(rand.NewSource(seed)) + dgst, err := digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63()))) + if err != nil { + panic(err) + } + + return ChainID(dgst) +} + +func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) { + td, err := ioutil.TempDir("", "layers-") + if err != nil { + t.Fatal(err) + } + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + + return fms.(*fileMetadataStore), td, func() { + if err := os.RemoveAll(td); err != nil { + t.Logf("Failed to cleanup %q: %s", td, err) + } + } +} + +func assertNotDirectoryError(t *testing.T, err error) { + perr, ok := err.(*os.PathError) + if !ok { + t.Fatalf("Unexpected error %#v, expected path error", err) + } + + if perr.Err != syscall.ENOTDIR { + t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.ENOTDIR) + } +} + +func assertPermissionError(t *testing.T, err error) { + perr, ok := err.(*os.PathError) + if !ok { + t.Fatalf("Unexpected error %#v, expected path error", err) + } + + if perr.Err != syscall.EACCES { + t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.EACCES) + } +} + +func TestCommitFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if err := tx.SetSize(0); err != nil { + t.Fatal(err) + } + + err = tx.Commit(randomLayerID(5)) + if err == nil { + t.Fatalf("Expected error committing with invalid layer parent directory") + } + assertNotDirectoryError(t, err) +} + +func TestStartTransactionFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + _, err := fms.StartTransaction() + if err == nil { + t.Fatalf("Expected error starting transaction with invalid layer parent directory") + } + assertNotDirectoryError(t, err) + + if err := os.Remove(filepath.Join(td, "tmp")); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if expected := filepath.Join(td, "tmp"); strings.HasPrefix(expected, tx.String()) { + t.Fatalf("Unexpected transaction string %q, expected prefix %q", tx.String(), expected) + } + + if err := tx.Cancel(); err != nil { + t.Fatal(err) + } +} diff --git a/layer/layer.go b/layer/layer.go new file mode 100644 index 0000000000..96bf5e28c1 --- /dev/null +++ b/layer/layer.go @@ -0,0 +1,238 @@ +// Package layer is package for managing read only +// and read-write mounts on the union file system +// driver. Read-only mounts are refenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read only layers and export both +// read only and writable layers. The exported +// tar data for a read only layer should match +// the tar used to create the layer. +package layer + +import ( + "errors" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/archive" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.Reader, error) +} + +// Layer represents a read only layer +type Layer interface { + TarStreamer + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Path returns the filesystem path to the writable + // layer. + Path() (string, error) + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) +} + +// Metadata holds information about a +// read only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root string) error + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID) (Layer, error) + Get(ChainID) (Layer, error) + Release(Layer) ([]Metadata, error) + + Mount(id string, parent ChainID, label string, init MountInit) (RWLayer, error) + Unmount(id string) error + DeleteMount(id string) ([]Metadata, error) + Changes(id string) ([]archive.Change, error) +} + +// MetadataTransaction represents functions for setting layer metadata +// with a single transaction. +type MetadataTransaction interface { + SetSize(int64) error + SetParent(parent ChainID) error + SetDiffID(DiffID) error + SetCacheID(string) error + TarSplitWriter() (io.WriteCloser, error) + + Commit(ChainID) error + Cancel() error + String() string +} + +// MetadataStore represents a backend for persisting +// metadata about layers and providing the metadata +// for restoring a Store. +type MetadataStore interface { + // StartTransaction starts an update for new metadata + // which will be used to represent an ID on commit. + StartTransaction() (MetadataTransaction, error) + + GetSize(ChainID) (int64, error) + GetParent(ChainID) (ChainID, error) + GetDiffID(ChainID) (DiffID, error) + GetCacheID(ChainID) (string, error) + TarSplitReader(ChainID) (io.ReadCloser, error) + + SetMountID(string, string) error + SetInitID(string, string) error + SetMountParent(string, ChainID) error + + GetMountID(string) (string, error) + GetInitID(string) (string, error) + GetMountParent(string) (ChainID, error) + + // List returns the full list of referened + // read-only and read-write layers + List() ([]ChainID, []string, error) + + Remove(ChainID) error + RemoveMount(string) error +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst, err := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + if err != nil { + // Digest calculation is not expected to throw an error, + // any error at this point is a program error + panic(err) + } + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, use this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/layer/layer_store.go b/layer/layer_store.go new file mode 100644 index 0000000000..55ec2262f5 --- /dev/null +++ b/layer/layer_store.go @@ -0,0 +1,649 @@ +package layer + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "runtime" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store MetadataStore + driver graphdriver.Driver + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex +} + +// NewStore creates a new Store instance using +// the provided metadata store and graph driver. +// The metadata store will be used to restore +// the Store. +func NewStore(store MetadataStore, driver graphdriver.Driver) (Store, error) { + ls := &layerStore{ + store: store, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + } + + ids, mounts, err := store.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, err + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, err + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, err + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, err + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.New() + tr := io.TeeReader(ts, digester.Hash()) + + tsw, err := tx.TarSplitWriter() + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr)) + if err != nil { + return err + } + + // Discard trailing data but ensure metadata is picked up to reconstruct stream + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + } + + if err = ls.driver.Create(layer.cacheID, pid, ""); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getAndRetainLayer(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layer, ok := ls.layerMap[l] + if !ok { + return nil + } + + layer.referenceCount++ + + return layer +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + layer := ls.get(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayers(l *roLayer, removed *[]Metadata, depth int) error { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return nil + } + + if len(*removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return err + } + + delete(ls.layerMap, l.chainID) + *removed = append(*removed, metadata) + + if l.parent != nil { + if err := ls.releaseLayers(l.parent, removed, depth+1); err != nil { + return err + } + } + + return nil +} + +func (ls *layerStore) releaseLayer(layer *roLayer) ([]Metadata, error) { + removed := []Metadata{} + err := ls.releaseLayers(layer, &removed, 0) + return removed, err +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) mount(m *mountedLayer, mountLabel string) error { + dir, err := ls.driver.Get(m.mountID, mountLabel) + if err != nil { + return err + } + m.path = dir + m.activityCount++ + + return nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mounts[mount.name] = mount + + return nil +} + +func (ls *layerStore) getAndRetainLayer(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowin about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + if err := ls.driver.Create(initID, parent, mountLabel); err != nil { + + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) Mount(name string, parent ChainID, mountLabel string, initFunc MountInit) (l RWLayer, err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + // Check if has path + if err := ls.mount(m, mountLabel); err != nil { + return nil, err + } + return m, nil + } + + var pid string + var p *roLayer + if string(parent) != "" { + ls.layerL.Lock() + p = ls.getAndRetainLayer(parent) + ls.layerL.Unlock() + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + mountID := name + if runtime.GOOS != "windows" { + // windows has issues if container ID doesn't match mount ID + mountID = stringid.GenerateRandomID() + } + + m = &mountedLayer{ + name: name, + parent: p, + mountID: mountID, + layerStore: ls, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc) + if err != nil { + return nil, err + } + m.initID = pid + } + + if err = ls.driver.Create(m.mountID, pid, ""); err != nil { + return nil, err + } + + if err = ls.saveMount(m); err != nil { + return nil, err + } + + if err = ls.mount(m, mountLabel); err != nil { + return nil, err + } + + return m, nil +} + +func (ls *layerStore) Unmount(name string) error { + ls.mountL.Lock() + defer ls.mountL.Unlock() + + m := ls.mounts[name] + if m == nil { + return ErrMountDoesNotExist + } + + m.activityCount-- + + if err := ls.driver.Put(m.mountID); err != nil { + return err + } + + return nil +} + +func (ls *layerStore) DeleteMount(name string) ([]Metadata, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + + m := ls.mounts[name] + if m == nil { + return nil, ErrMountDoesNotExist + } + if m.activityCount > 0 { + return nil, ErrActiveMount + } + + delete(ls.mounts, name) + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) Changes(name string) ([]archive.Change, error) { + ls.mountL.Lock() + m := ls.mounts[name] + ls.mountL.Unlock() + if m == nil { + return nil, ErrMountDoesNotExist + } + pid := m.initID + if pid == "" && m.parent != nil { + pid = m.parent.cacheID + } + return ls.driver.Changes(m.mountID, pid) +} + +func (ls *layerStore) assembleTar(graphID string, metadata io.ReadCloser, size *int64) (io.Reader, error) { + type diffPathDriver interface { + DiffPath(string) (string, func() error, error) + } + + diffDriver, ok := ls.driver.(diffPathDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + // get our relative path to the container + fsPath, releasePath, err := diffDriver.DiffPath(graphID) + if err != nil { + metadata.Close() + return nil, err + } + + pR, pW := io.Pipe() + // this will need to be in a goroutine, as we are returning the stream of a + // tar archive, but can not close the metadata reader early (when this + // function returns)... + go func() { + defer releasePath() + defer metadata.Close() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + fileGetter := storage.NewPathFileGetter(fsPath) + logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath) + ots := asm.NewOutputTarStream(fileGetter, upackerCounter) + defer ots.Close() + if _, err := io.Copy(pW, ots); err != nil { + pW.CloseWithError(err) + return + } + pW.Close() + }() + return pR, nil +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +func (n *naiveDiffPathDriver) DiffPath(id string) (string, func() error, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return "", nil, err + } + return p, func() error { + return n.Driver.Put(id) + }, nil +} diff --git a/layer/layer_test.go b/layer/layer_test.go new file mode 100644 index 0000000000..e08f594e01 --- /dev/null +++ b/layer/layer_test.go @@ -0,0 +1,725 @@ +package layer + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" +) + +func init() { + graphdriver.ApplyUncompressedLayer = archive.UnpackLayer + vfs.CopyWithTar = archive.CopyWithTar +} + +func newVFSGraphDriver(td string) (graphdriver.Driver, error) { + uidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getuid(), + Size: 1, + }, + } + gidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getgid(), + Size: 1, + }, + } + + return graphdriver.GetDriver("vfs", td, nil, uidMap, gidMap) +} + +func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) { + td, err := ioutil.TempDir("", "graph-") + if err != nil { + t.Fatal(err) + } + + driver, err := newVFSGraphDriver(td) + if err != nil { + t.Fatal(err) + } + + return driver, func() { + os.RemoveAll(td) + } +} + +func newTestStore(t *testing.T) (Store, func()) { + td, err := ioutil.TempDir("", "layerstore-") + if err != nil { + t.Fatal(err) + } + + graph, graphcleanup := newTestGraphDriver(t) + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + ls, err := NewStore(fms, graph) + if err != nil { + t.Fatal(err) + } + + return ls, func() { + graphcleanup() + os.RemoveAll(td) + } +} + +type layerInit func(root string) error + +func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { + containerID := stringid.GenerateRandomID() + mount, err := ls.Mount(containerID, parent, "", nil) + if err != nil { + return nil, err + } + + path, err := mount.Path() + if err != nil { + return nil, err + } + + if err := layerFunc(path); err != nil { + return nil, err + } + + ts, err := mount.TarStream() + if err != nil { + return nil, err + } + + layer, err := ls.Register(ts, parent) + if err != nil { + return nil, err + } + + if err := ls.Unmount(containerID); err != nil { + return nil, err + } + + if _, err := ls.DeleteMount(containerID); err != nil { + return nil, err + } + + return layer, nil +} + +type FileApplier interface { + ApplyFile(root string) error +} + +type testFile struct { + name string + content []byte + permission os.FileMode +} + +func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { + return &testFile{ + name: name, + content: content, + permission: perm, + } +} + +func (tf *testFile) ApplyFile(root string) error { + fullPath := filepath.Join(root, tf.name) + if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { + return err + } + // Check if already exists + if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { + if err := os.Chmod(fullPath, tf.permission); err != nil { + return err + } + } + if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil { + return err + } + return nil +} + +func initWithFiles(files ...FileApplier) layerInit { + return func(root string) error { + for _, f := range files { + if err := f.ApplyFile(root); err != nil { + return err + } + } + return nil + } +} + +func getCachedLayer(l Layer) *roLayer { + if rl, ok := l.(*referencedCacheLayer); ok { + return rl.roLayer + } + return l.(*roLayer) +} + +func createMetadata(layers ...Layer) []Metadata { + metadata := make([]Metadata, len(layers)) + for i := range layers { + size, err := layers[i].Size() + if err != nil { + panic(err) + } + + metadata[i].ChainID = layers[i].ChainID() + metadata[i].DiffID = layers[i].DiffID() + metadata[i].Size = size + metadata[i].DiffSize = getCachedLayer(layers[i]).size + } + + return metadata +} + +func assertMetadata(t *testing.T, metadata, expectedMetadata []Metadata) { + if len(metadata) != len(expectedMetadata) { + t.Fatalf("Unexpected number of deletes %d, expected %d", len(metadata), len(expectedMetadata)) + } + + for i := range metadata { + if metadata[i] != expectedMetadata[i] { + t.Errorf("Unexpected metadata\n\tExpected: %#v\n\tActual: %#v", expectedMetadata[i], metadata[i]) + } + } + if t.Failed() { + t.FailNow() + } +} + +func releaseAndCheckDeleted(t *testing.T, ls Store, layer Layer, removed ...Layer) { + layerCount := len(ls.(*layerStore).layerMap) + expectedMetadata := createMetadata(removed...) + metadata, err := ls.Release(layer) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, expectedMetadata) + + if expected := layerCount - len(removed); len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } +} + +func cacheID(l Layer) string { + return getCachedLayer(l).cacheID +} + +func assertLayerEqual(t *testing.T, l1, l2 Layer) { + if l1.ChainID() != l2.ChainID() { + t.Fatalf("Mismatched ID: %s vs %s", l1.ChainID(), l2.ChainID()) + } + if l1.DiffID() != l2.DiffID() { + t.Fatalf("Mismatched DiffID: %s vs %s", l1.DiffID(), l2.DiffID()) + } + + size1, err := l1.Size() + if err != nil { + t.Fatal(err) + } + + size2, err := l2.Size() + if err != nil { + t.Fatal(err) + } + + if size1 != size2 { + t.Fatalf("Mismatched size: %d vs %d", size1, size2) + } + + if cacheID(l1) != cacheID(l2) { + t.Fatalf("Mismatched cache id: %s vs %s", cacheID(l1), cacheID(l2)) + } + + p1 := l1.Parent() + p2 := l2.Parent() + if p1 != nil && p2 != nil { + assertLayerEqual(t, p1, p2) + } else if p1 != nil || p2 != nil { + t.Fatalf("Mismatched parents: %v vs %v", p1, p2) + } +} + +func TestMountAndRegister(t *testing.T) { + ls, cleanup := newTestStore(t) + defer cleanup() + + li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + size, _ := layer.Size() + t.Logf("Layer size: %d", size) + + mount2, err := ls.Mount("new-test-mount", layer.ChainID(), "", nil) + if err != nil { + t.Fatal(err) + } + + path2, err := mount2.Path() + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + + if expected := "some test data"; string(b) != expected { + t.Fatalf("Wrong file data, expected %q, got %q", expected, string(b)) + } + + if err := ls.Unmount("new-test-mount"); err != nil { + t.Fatal(err) + } + + if _, err := ls.DeleteMount("new-test-mount"); err != nil { + t.Fatal(err) + } +} + +func TestLayerRelease(t *testing.T) { + ls, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + t.Logf("Layer1: %s", layer1.ChainID()) + t.Logf("Layer2: %s", layer2.ChainID()) + t.Logf("Layer3a: %s", layer3a.ChainID()) + t.Logf("Layer3b: %s", layer3b.ChainID()) + + if expected := 4; len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } + + releaseAndCheckDeleted(t, ls, layer3b, layer3b) + releaseAndCheckDeleted(t, ls, layer3a, layer3a, layer2, layer1) +} + +func TestStoreRestore(t *testing.T) { + ls, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + m, err := ls.Mount("some-mount_name", layer3.ChainID(), "", nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Path() + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { + t.Fatal(err) + } + + if err := ls.Unmount("some-mount_name"); err != nil { + t.Fatal(err) + } + + ls2, err := NewStore(ls.(*layerStore).store, ls.(*layerStore).driver) + if err != nil { + t.Fatal(err) + } + + layer3b, err := ls2.Get(layer3.ChainID()) + if err != nil { + t.Fatal(err) + } + + assertLayerEqual(t, layer3b, layer3) + + // Mount again with same name, should already be loaded + m2, err := ls2.Mount("some-mount_name", layer3b.ChainID(), "", nil) + if err != nil { + t.Fatal(err) + } + + path2, err := m2.Path() + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + if expected := "nothing here"; string(b) != expected { + t.Fatalf("Unexpected content %q, expected %q", string(b), expected) + } + + if err := ls2.Unmount("some-mount_name"); err != nil { + t.Fatal(err) + } + + if metadata, err := ls2.DeleteMount("some-mount_name"); err != nil { + t.Fatal(err) + } else if len(metadata) != 0 { + t.Fatalf("Unexpectedly deleted layers: %#v", metadata) + } + + releaseAndCheckDeleted(t, ls2, layer3b, layer3, layer2, layer1) +} + +func TestTarStreamStability(t *testing.T) { + ls, cleanup := newTestStore(t) + defer cleanup() + + files1 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644), + } + addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644) + files2 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664), + newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644), + } + + tar1, err := tarFromFiles(files1...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(files2...) + if err != nil { + t.Fatal(err) + } + + layer1, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + // hack layer to add file + p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") + if err != nil { + t.Fatal(err) + } + + if err := addedFile.ApplyFile(p); err != nil { + t.Fatal(err) + } + + if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil { + t.Fatal(err) + } + + layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + id1 := layer1.ChainID() + t.Logf("Layer 1: %s", layer1.ChainID()) + t.Logf("Layer 2: %s", layer2.ChainID()) + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar2, layer2) + + layer1b, err := ls.Get(id1) + if err != nil { + t.Logf("Content of layer map: %#v", ls.(*layerStore).layerMap) + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar1, layer1b) + + if _, err := ls.Release(layer1b); err != nil { + t.Fatal(err) + } +} + +func assertLayerDiff(t *testing.T, expected []byte, layer Layer) { + expectedDigest, err := digest.FromBytes(expected) + if err != nil { + t.Fatal(err) + } + + if digest.Digest(layer.DiffID()) != expectedDigest { + t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected) + } + + ts, err := layer.TarStream() + if err != nil { + t.Fatal(err) + } + + actual, err := ioutil.ReadAll(ts) + if err != nil { + t.Fatal(err) + } + + if len(actual) != len(expected) { + logByteDiff(t, actual, expected) + t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected)) + } + + actualDigest, err := digest.FromBytes(actual) + if err != nil { + t.Fatal(err) + } + + if actualDigest != expectedDigest { + logByteDiff(t, actual, expected) + t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest) + } +} + +const maxByteLog = 4 * 1024 + +func logByteDiff(t *testing.T, actual, expected []byte) { + d1, d2 := byteDiff(actual, expected) + if len(d1) == 0 && len(d2) == 0 { + return + } + + prefix := len(actual) - len(d1) + if len(d1) > maxByteLog || len(d2) > maxByteLog { + t.Logf("Byte diff after %d matching bytes", prefix) + } else { + t.Logf("Byte diff after %d matching bytes\nActual bytes after prefix:\n%x\nExpected bytes after prefix:\n%x", prefix, d1, d2) + } +} + +// byteDiff returns the differing bytes after the matching prefix +func byteDiff(b1, b2 []byte) ([]byte, []byte) { + i := 0 + for i < len(b1) && i < len(b2) { + if b1[i] != b2[i] { + break + } + i++ + } + + return b1[i:], b2[i:] +} + +func tarFromFiles(files ...FileApplier) ([]byte, error) { + td, err := ioutil.TempDir("", "tar-") + if err != nil { + return nil, err + } + defer os.RemoveAll(td) + + for _, f := range files { + if err := f.ApplyFile(td); err != nil { + return nil, err + } + } + + r, err := archive.Tar(td, archive.Uncompressed) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, r); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// assertReferences asserts that all the references are to the same +// image and represent the full set of references to that image. +func assertReferences(t *testing.T, references ...Layer) { + if len(references) == 0 { + return + } + base := references[0].(*referencedCacheLayer).roLayer + seenReferences := map[Layer]struct{}{ + references[0]: {}, + } + for i := 1; i < len(references); i++ { + other := references[i].(*referencedCacheLayer).roLayer + if base != other { + t.Fatalf("Unexpected referenced cache layer %s, expecting %s", other.ChainID(), base.ChainID()) + } + if _, ok := base.references[references[i]]; !ok { + t.Fatalf("Reference not part of reference list: %v", references[i]) + } + if _, ok := seenReferences[references[i]]; ok { + t.Fatalf("Duplicated reference %v", references[i]) + } + } + if rc := len(base.references); rc != len(references) { + t.Fatalf("Unexpected number of references %d, expecting %d", rc, len(references)) + } +} + +func TestRegisterExistingLayer(t *testing.T) { + ls, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layerFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644), + } + + li := initWithFiles(baseFiles...) + layer1, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + tar1, err := tarFromFiles(layerFiles...) + if err != nil { + t.Fatal(err) + } + + layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer2a, layer2b) +} + +func graphDiffSize(ls Store, l Layer) (int64, error) { + cl := getCachedLayer(l) + var parent string + if cl.parent != nil { + parent = cl.parent.cacheID + } + return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) +} + +func TestLayerSize(t *testing.T) { + ls, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Added contents") + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644))) + if err != nil { + t.Fatal(err) + } + + layer1DiffSize, err := graphDiffSize(ls, layer1) + if err != nil { + t.Fatal(err) + } + + if int(layer1DiffSize) != len(content1) { + t.Fatalf("Unexpected diff size %d, expected %d", layer1DiffSize, len(content1)) + } + + layer1Size, err := layer1.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1); int(layer1Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer1Size, expected) + } + + layer2DiffSize, err := graphDiffSize(ls, layer2) + if err != nil { + t.Fatal(err) + } + + if int(layer2DiffSize) != len(content2) { + t.Fatalf("Unexpected diff size %d, expected %d", layer2DiffSize, len(content2)) + } + + layer2Size, err := layer2.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1) + len(content2); int(layer2Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer2Size, expected) + } +} diff --git a/layer/layer_windows.go b/layer/layer_windows.go new file mode 100644 index 0000000000..d2e91b7980 --- /dev/null +++ b/layer/layer_windows.go @@ -0,0 +1,109 @@ +package layer + +import ( + "errors" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" +) + +// GetLayerPath returns the path to a layer +func GetLayerPath(s Store, layer ChainID) (string, error) { + ls, ok := s.(*layerStore) + if !ok { + return "", errors.New("unsupported layer store") + } + ls.layerL.Lock() + defer ls.layerL.Unlock() + + rl, ok := ls.layerMap[layer] + if !ok { + return "", ErrLayerDoesNotExist + } + + path, err := ls.driver.Get(rl.cacheID, "") + if err != nil { + return "", err + } + + if err := ls.driver.Put(rl.cacheID); err != nil { + return "", err + } + + return path, nil +} + +// RWLayerMetadata returns the graph metadata for the provided +// mount name. +func RWLayerMetadata(s Store, name string) (map[string]string, error) { + ls, ok := s.(*layerStore) + if !ok { + return nil, errors.New("unsupported layer store") + } + ls.mountL.Lock() + defer ls.mountL.Unlock() + + ml, ok := ls.mounts[name] + if !ok { + return nil, errors.New("mount does not exist") + } + + return ls.driver.GetMetadata(ml.mountID) +} + +func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { + var err error // this is used for cleanup in existingLayer case + diffID, err := digest.FromBytes([]byte(graphID)) + if err != nil { + return nil, err + } + + // Create new roLayer + layer := &roLayer{ + cacheID: graphID, + diffID: DiffID(diffID), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + size: size, + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + defer func() { + if err != nil { + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + layer.chainID = createChainIDFromParent("", layer.diffID) + + if !ls.driver.Exists(layer.cacheID) { + return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) + } + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getAndRetainLayer(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} diff --git a/layer/migration.go b/layer/migration.go new file mode 100644 index 0000000000..db25ed9e94 --- /dev/null +++ b/layer/migration.go @@ -0,0 +1,251 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +func (ls *layerStore) MountByGraphID(name string, graphID string, parent ChainID) (l RWLayer, err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + if m.parent.chainID != parent { + return nil, errors.New("name conflict, mismatched parent") + } + if m.mountID != graphID { + return nil, errors.New("mount already exists") + } + + return m, nil + } + + if !ls.driver.Exists(graphID) { + return nil, errors.New("graph ID does not exist") + } + + var p *roLayer + if string(parent) != "" { + ls.layerL.Lock() + p = ls.getAndRetainLayer(parent) + ls.layerL.Unlock() + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // TODO: Ensure graphID has correct parent + + m = &mountedLayer{ + name: name, + parent: p, + mountID: graphID, + layerStore: ls, + } + + // Check for existing init layer + initID := fmt.Sprintf("%s-init", graphID) + if ls.driver.Exists(initID) { + m.initID = initID + } + + if err = ls.saveMount(m); err != nil { + return nil, err + } + + // TODO: provide a mount label + if err = ls.mount(m, ""); err != nil { + return nil, err + } + + return m, nil +} + +func (ls *layerStore) migrateLayer(tx MetadataTransaction, tarDataFile string, layer *roLayer) error { + var ar io.Reader + var tdf *os.File + var err error + if tarDataFile != "" { + tdf, err = os.Open(tarDataFile) + if err != nil { + if !os.IsNotExist(err) { + return err + } + tdf = nil + } + defer tdf.Close() + } + if tdf != nil { + tsw, err := tx.TarSplitWriter() + if err != nil { + return err + } + + defer tsw.Close() + + uncompressed, err := gzip.NewReader(tdf) + if err != nil { + return err + } + defer uncompressed.Close() + + tr := io.TeeReader(uncompressed, tsw) + trc := ioutils.NewReadCloserWrapper(tr, uncompressed.Close) + + ar, err = ls.assembleTar(layer.cacheID, trc, &layer.size) + if err != nil { + return err + } + + } else { + var graphParent string + if layer.parent != nil { + graphParent = layer.parent.cacheID + } + archiver, err := ls.driver.Diff(layer.cacheID, graphParent) + if err != nil { + return err + } + defer archiver.Close() + + tsw, err := tx.TarSplitWriter() + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + packerCounter := &packSizeCounter{metaPacker, &layer.size} + defer tsw.Close() + + ar, err = asm.NewInputTarStream(archiver, packerCounter, nil) + if err != nil { + return err + } + } + + digester := digest.Canonical.New() + _, err = io.Copy(digester.Hash(), ar) + if err != nil { + return err + } + + layer.diffID = DiffID(digester.Digest()) + + return nil +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, tarDataFile string) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.migrateLayer(tx, tarDataFile, layer); err != nil { + return nil, err + } + + layer.chainID = createChainIDFromParent(parent, layer.diffID) + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getAndRetainLayer(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/layer/migration_test.go b/layer/migration_test.go new file mode 100644 index 0000000000..11614ffde0 --- /dev/null +++ b/layer/migration_test.go @@ -0,0 +1,385 @@ +package layer + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +func writeTarSplitFile(name string, tarContent []byte) error { + f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + fz := gzip.NewWriter(f) + + metaPacker := storage.NewJSONPacker(fz) + defer fz.Close() + + rdr, err := asm.NewInputTarStream(bytes.NewReader(tarContent), metaPacker, nil) + if err != nil { + return err + } + + if _, err := io.Copy(ioutil.Discard, rdr); err != nil { + return err + } + + return nil +} + +func TestLayerMigration(t *testing.T) { + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + tar1, err := tarFromFiles(layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(layer2Files...) + if err != nil { + t.Fatal(err) + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + + graphID1 := stringid.GenerateRandomID() + if err := graph.Create(graphID1, "", ""); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID1, "", archive.Reader(bytes.NewReader(tar1))); err != nil { + t.Fatal(err) + } + + tf1 := filepath.Join(td, "tar1.json.gz") + if err := writeTarSplitFile(tf1, tar1); err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStore(fms, graph) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", tf1) + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) + if err != nil { + t.Fatal(err) + } + + graphID2 := stringid.GenerateRandomID() + if err := graph.Create(graphID2, graphID1, ""); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID2, graphID1, archive.Reader(bytes.NewReader(tar2))); err != nil { + t.Fatal(err) + } + + tf2 := filepath.Join(td, "tar2.json.gz") + if err := writeTarSplitFile(tf2, tar2); err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), tf2) + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func tarFromFilesInGraph(graph graphdriver.Driver, graphID, parentID string, files ...FileApplier) ([]byte, error) { + t, err := tarFromFiles(files...) + if err != nil { + return nil, err + } + + if err := graph.Create(graphID, parentID, ""); err != nil { + return nil, err + } + if _, err := graph.ApplyDiff(graphID, parentID, archive.Reader(bytes.NewReader(t))); err != nil { + return nil, err + } + + ar, err := graph.Diff(graphID, parentID) + if err != nil { + return nil, err + } + defer ar.Close() + + return ioutil.ReadAll(ar) +} + +func TestLayerMigrationNoTarsplit(t *testing.T) { + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + graphID1 := stringid.GenerateRandomID() + graphID2 := stringid.GenerateRandomID() + + tar1, err := tarFromFilesInGraph(graph, graphID1, "", layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFilesInGraph(graph, graphID2, graphID1, layer2Files...) + if err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStore(fms, graph) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", "") + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), "") + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func TestMountMigration(t *testing.T) { + ls, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + initFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte{}, 0644), + newTestFile("/etc/resolv.conf", []byte{}, 0644), + } + mountFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644), + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644), + } + + initTar, err := tarFromFiles(initFiles...) + if err != nil { + t.Fatal(err) + } + + mountTar, err := tarFromFiles(mountFiles...) + if err != nil { + t.Fatal(err) + } + + graph := ls.(*layerStore).driver + + layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) + if err != nil { + t.Fatal(err) + } + + graphID1 := layer1.(*referencedCacheLayer).cacheID + + containerID := stringid.GenerateRandomID() + containerInit := fmt.Sprintf("%s-init", containerID) + + if err := graph.Create(containerInit, graphID1, ""); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerInit, graphID1, archive.Reader(bytes.NewReader(initTar))); err != nil { + t.Fatal(err) + } + + if err := graph.Create(containerID, containerInit, ""); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerID, containerInit, archive.Reader(bytes.NewReader(mountTar))); err != nil { + t.Fatal(err) + } + + rwLayer1, err := ls.(*layerStore).MountByGraphID("migration-mount", containerID, layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + changes, err := ls.Changes("migration-mount") + if err != nil { + t.Fatal(err) + } + + if expected := 5; len(changes) != expected { + t.Logf("Changes %#v", changes) + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/etc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/etc/hosts", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/root", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/root/.bashrc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[4], archive.Change{ + Path: "/root/testfile1.txt", + Kind: archive.ChangeAdd, + }) + + if expectedCount := 1; rwLayer1.(*mountedLayer).activityCount != expectedCount { + t.Fatalf("Wrong activity count %d, expected %d", rwLayer1.(*mountedLayer).activityCount, expectedCount) + } + + rwLayer2, err := ls.Mount("migration-mount", layer1.ChainID(), "", nil) + if err != nil { + t.Fatal(err) + } + + if rwLayer1 != rwLayer2 { + t.Fatalf("Wrong rwlayer %v, expected %v", rwLayer2, rwLayer1) + } + + if expectedCount := 2; rwLayer2.(*mountedLayer).activityCount != expectedCount { + t.Fatalf("Wrong activity count %d, expected %d", rwLayer2.(*mountedLayer).activityCount, expectedCount) + } + + if metadata, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata) + } + + if err := ls.Unmount("migration-mount"); err != nil { + t.Fatal(err) + } + if _, err := ls.DeleteMount("migration-mount"); err == nil { + t.Fatal("Expected error deleting active mount") + } + if err := ls.Unmount("migration-mount"); err != nil { + t.Fatal(err) + } + metadata, err := ls.DeleteMount("migration-mount") + if err != nil { + t.Fatal(err) + } + if len(metadata) == 0 { + t.Fatal("Expected base layer to be deleted when deleting mount") + } + + assertMetadata(t, metadata, createMetadata(layer1)) +} diff --git a/layer/mount_test.go b/layer/mount_test.go new file mode 100644 index 0000000000..195f81193d --- /dev/null +++ b/layer/mount_test.go @@ -0,0 +1,217 @@ +package layer + +import ( + "io/ioutil" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/docker/docker/pkg/archive" +) + +func TestMountInit(t *testing.T) { + ls, cleanup := newTestStore(t) + defer cleanup() + + basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) + initfile := newTestFile("testfile.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefile) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + + m, err := ls.Mount("fun-mount", layer.ChainID(), "", mountInit) + if err != nil { + t.Fatal(err) + } + + path, err := m.Path() + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(filepath.Join(path, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + + if expected := "init data!"; string(b) != expected { + t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected) + } + + if fi.Mode().Perm() != 0777 { + t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777) + } +} + +func TestMountSize(t *testing.T) { + ls, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Mutable contents") + contentInit := []byte("why am I excluded from the size ☹") + + li := initWithFiles(newTestFile("file1", content1, 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return newTestFile("file-init", contentInit, 0777).ApplyFile(root) + } + + m, err := ls.Mount("mount-size", layer.ChainID(), "", mountInit) + if err != nil { + t.Fatal(err) + } + + path, err := m.Path() + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil { + t.Fatal(err) + } + + mountSize, err := m.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content2); int(mountSize) != expected { + t.Fatalf("Unexpected mount size %d, expected %d", int(mountSize), expected) + } +} + +func TestMountChanges(t *testing.T) { + ls, cleanup := newTestStore(t) + defer cleanup() + + basefiles := []FileApplier{ + newTestFile("testfile1.txt", []byte("base data!"), 0644), + newTestFile("testfile2.txt", []byte("base data!"), 0644), + newTestFile("testfile3.txt", []byte("base data!"), 0644), + } + initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefiles...) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + + m, err := ls.Mount("mount-changes", layer.ChainID(), "", mountInit) + if err != nil { + t.Fatal(err) + } + + path, err := m.Path() + if err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { + t.Fatal(err) + } + + if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { + t.Fatal(err) + } + + changes, err := ls.Changes("mount-changes") + if err != nil { + t.Fatal(err) + } + + if expected := 4; len(changes) != expected { + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/testfile1.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/testfile2.txt", + Kind: archive.ChangeDelete, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/testfile3.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/testfile4.txt", + Kind: archive.ChangeAdd, + }) +} + +func assertChange(t *testing.T, actual, expected archive.Change) { + if actual.Path != expected.Path { + t.Fatalf("Unexpected change path %s, expected %s", actual.Path, expected.Path) + } + if actual.Kind != expected.Kind { + t.Fatalf("Unexpected change type %s, expected %s", actual.Kind, expected.Kind) + } +} + +func sortChanges(changes []archive.Change) { + cs := &changeSorter{ + changes: changes, + } + sort.Sort(cs) +} + +type changeSorter struct { + changes []archive.Change +} + +func (cs *changeSorter) Len() int { + return len(cs.changes) +} + +func (cs *changeSorter) Swap(i, j int) { + cs.changes[i], cs.changes[j] = cs.changes[j], cs.changes[i] +} + +func (cs *changeSorter) Less(i, j int) bool { + return cs.changes[i].Path < cs.changes[j].Path +} diff --git a/layer/mounted_layer.go b/layer/mounted_layer.go new file mode 100644 index 0000000000..a1eaa25c4c --- /dev/null +++ b/layer/mounted_layer.go @@ -0,0 +1,64 @@ +package layer + +import "io" + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + path string + layerStore *layerStore + activityCount int +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.Reader, error) { + archiver, err := ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) + if err != nil { + return nil, err + } + return autoClosingReader{archiver}, nil +} + +func (ml *mountedLayer) Path() (string, error) { + if ml.path == "" { + return "", ErrNotMounted + } + return ml.path, nil +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +type autoClosingReader struct { + source io.ReadCloser +} + +func (r autoClosingReader) Read(p []byte) (n int, err error) { + n, err = r.source.Read(p) + if err != nil { + r.source.Close() + } + return +} diff --git a/layer/ro_layer.go b/layer/ro_layer.go new file mode 100644 index 0000000000..3a547ca013 --- /dev/null +++ b/layer/ro_layer.go @@ -0,0 +1,110 @@ +package layer + +import "io" + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + + referenceCount int + references map[Layer]struct{} +} + +func (rl *roLayer) TarStream() (io.Reader, error) { + r, err := rl.layerStore.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + return rl.layerStore.assembleTar(rl.cacheID, r, nil) +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx MetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + + return nil +} From 7de380c5c673411639d84e07c29830eb81cb1c8d Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 18 Nov 2015 14:16:21 -0800 Subject: [PATCH 2/7] Add tag store The tag store associates tags and digests with image IDs. This functionality used to be part of graph package. This commit splits it off into a self-contained package with a simple interface. Signed-off-by: Aaron Lehmann --- tag/store.go | 282 +++++++++++++++++++++++++++++++++++++++ tag/store_test.go | 328 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 610 insertions(+) create mode 100644 tag/store.go create mode 100644 tag/store_test.go diff --git a/tag/store.go b/tag/store.go new file mode 100644 index 0000000000..605f3ea8d6 --- /dev/null +++ b/tag/store.go @@ -0,0 +1,282 @@ +package tag + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" +) + +// DefaultTag defines the default tag used when performing images related actions and no tag string is specified +const DefaultTag = "latest" + +var ( + // ErrDoesNotExist is returned if a reference is not found in the + // store. + ErrDoesNotExist = errors.New("reference does not exist") +) + +// An Association is a tuple associating a reference with an image ID. +type Association struct { + Ref reference.Named + ImageID image.ID +} + +// Store provides the set of methods which can operate on a tag store. +type Store interface { + References(id image.ID) []reference.Named + ReferencesByName(ref reference.Named) []Association + Add(ref reference.Named, id image.ID, force bool) error + Delete(ref reference.Named) (bool, error) + Get(ref reference.Named) (image.ID, error) +} + +type store struct { + mu sync.RWMutex + // jsonPath is the path to the file where the serialized tag data is + // stored. + jsonPath string + // Repositories is a map of repositories, indexed by name. + Repositories map[string]repository + // referencesByIDCache is a cache of references indexed by ID, to speed + // up References. + referencesByIDCache map[image.ID]map[string]reference.Named +} + +// Repository maps tags to image IDs. The key is a a stringified Reference, +// including the repository name. +type repository map[string]image.ID + +func defaultTagIfNameOnly(ref reference.Named) reference.Named { + switch ref.(type) { + case reference.Tagged: + return ref + case reference.Digested: + return ref + default: + // Should never fail + ref, _ = reference.WithTag(ref, DefaultTag) + return ref + } +} + +// NewTagStore creates a new tag store, tied to a file path where the set of +// tags is serialized in JSON format. +func NewTagStore(jsonPath string) (Store, error) { + abspath, err := filepath.Abs(jsonPath) + if err != nil { + return nil, err + } + + store := &store{ + jsonPath: abspath, + Repositories: make(map[string]repository), + referencesByIDCache: make(map[image.ID]map[string]reference.Named), + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +// Add adds a tag or digest to the store. If force is set to true, existing +// references can be overwritten. This only works for tags, not digests. +func (store *store) Add(ref reference.Named, id image.ID, force bool) error { + ref = defaultTagIfNameOnly(ref) + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + repository = make(map[string]image.ID) + store.Repositories[ref.Name()] = repository + } + + refStr := ref.String() + oldID, exists := repository[refStr] + + if exists { + // force only works for tags + if digested, isDigest := ref.(reference.Digested); isDigest { + return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) + } + + if !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) + } + + if store.referencesByIDCache[oldID] != nil { + delete(store.referencesByIDCache[oldID], refStr) + if len(store.referencesByIDCache[oldID]) == 0 { + delete(store.referencesByIDCache, oldID) + } + } + } + + repository[refStr] = id + if store.referencesByIDCache[id] == nil { + store.referencesByIDCache[id] = make(map[string]reference.Named) + } + store.referencesByIDCache[id][refStr] = ref + + return store.save() +} + +// Delete deletes a reference from the store. It returns true if a deletion +// happened, or false otherwise. +func (store *store) Delete(ref reference.Named) (bool, error) { + ref = defaultTagIfNameOnly(ref) + + store.mu.Lock() + defer store.mu.Unlock() + + repoName := ref.Name() + + repository, exists := store.Repositories[repoName] + if !exists { + return false, ErrDoesNotExist + } + + refStr := ref.String() + if id, exists := repository[refStr]; exists { + delete(repository, refStr) + if len(repository) == 0 { + delete(store.Repositories, repoName) + } + if store.referencesByIDCache[id] != nil { + delete(store.referencesByIDCache[id], refStr) + if len(store.referencesByIDCache[id]) == 0 { + delete(store.referencesByIDCache, id) + } + } + return true, store.save() + } + + return false, ErrDoesNotExist +} + +// Get retrieves an item from the store by reference. +func (store *store) Get(ref reference.Named) (image.ID, error) { + ref = defaultTagIfNameOnly(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + return "", ErrDoesNotExist + } + + id, exists := repository[ref.String()] + if !exists { + return "", ErrDoesNotExist + } + + return id, nil +} + +// References returns a slice of references to the given image ID. The slice +// will be nil if there are no references to this image ID. +func (store *store) References(id image.ID) []reference.Named { + store.mu.RLock() + defer store.mu.RUnlock() + + // Convert the internal map to an array for two reasons: + // 1) We must not return a mutable reference. + // 2) It would be ugly to expose the extraneous map keys to callers. + + var references []reference.Named + for _, ref := range store.referencesByIDCache[id] { + references = append(references, ref) + } + + return references +} + +// ReferencesByName returns the references for a given repository name. +// If there are no references known for this repository name, +// ReferencesByName returns nil. +func (store *store) ReferencesByName(ref reference.Named) []Association { + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists { + return nil + } + + var associations []Association + for refStr, refID := range repository { + ref, err := reference.ParseNamed(refStr) + if err != nil { + // Should never happen + return nil + } + associations = append(associations, + Association{ + Ref: ref, + ImageID: refID, + }) + } + + return associations +} + +func (store *store) save() error { + // Store the json + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + + tempFilePath := store.jsonPath + ".tmp" + + if err := ioutil.WriteFile(tempFilePath, jsonData, 0600); err != nil { + return err + } + + if err := os.Rename(tempFilePath, store.jsonPath); err != nil { + return err + } + + return nil +} + +func (store *store) reload() error { + f, err := os.Open(store.jsonPath) + if err != nil { + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { + return err + } + + for _, repository := range store.Repositories { + for refStr, refID := range repository { + ref, err := reference.ParseNamed(refStr) + if err != nil { + // Should never happen + continue + } + if store.referencesByIDCache[refID] == nil { + store.referencesByIDCache[refID] = make(map[string]reference.Named) + } + store.referencesByIDCache[refID][refStr] = ref + } + } + + return nil +} diff --git a/tag/store_test.go b/tag/store_test.go new file mode 100644 index 0000000000..3a61289091 --- /dev/null +++ b/tag/store_test.go @@ -0,0 +1,328 @@ +package tag + +import ( + "bytes" + "io/ioutil" + "os" + "sort" + "strings" + "testing" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" +) + +var ( + saveLoadTestCases = map[string]image.ID{ + "registry:5000/foobar:HEAD": "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6", + "registry:5000/foobar:alternate": "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793", + "registry:5000/foobar:latest": "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b", + "registry:5000/foobar:master": "sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc", + "jess/hollywood:latest": "sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe", + "registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6": "sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c", + "busybox:latest": "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + } + + marshalledSaveLoadTestCases = []byte(`{"Repositories":{"busybox":{"busybox:latest":"sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"},"jess/hollywood":{"jess/hollywood:latest":"sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe"},"registry":{"registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6":"sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c"},"registry:5000/foobar":{"registry:5000/foobar:HEAD":"sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6","registry:5000/foobar:alternate":"sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793","registry:5000/foobar:latest":"sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b","registry:5000/foobar:master":"sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc"}}}`) +) + +func TestLoad(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + defer os.RemoveAll(jsonFile.Name()) + + // Write canned json to the temp file + _, err = jsonFile.Write(marshalledSaveLoadTestCases) + if err != nil { + t.Fatalf("error writing to temp file: %v", err) + } + jsonFile.Close() + + store, err := NewTagStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, expectedID := range saveLoadTestCases { + ref, err := reference.ParseNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + id, err := store.Get(ref) + if err != nil { + t.Fatalf("could not find reference %s: %v", refStr, err) + } + if id != expectedID { + t.Fatalf("expected %s - got %s", expectedID, id) + } + } +} + +func TestSave(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewTagStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, id := range saveLoadTestCases { + ref, err := reference.ParseNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + err = store.Add(ref, id, false) + if err != nil { + t.Fatalf("could not add reference %s: %v", refStr, err) + } + } + + jsonBytes, err := ioutil.ReadFile(jsonFile.Name()) + if err != nil { + t.Fatalf("could not read json file: %v", err) + } + + if !bytes.Equal(jsonBytes, marshalledSaveLoadTestCases) { + t.Fatalf("save output did not match expectations\nexpected:\n%s\ngot:\n%s", marshalledSaveLoadTestCases, jsonBytes) + } +} + +type LexicalRefs []reference.Named + +func (a LexicalRefs) Len() int { return len(a) } +func (a LexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a LexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } + +type LexicalAssociations []Association + +func (a LexicalAssociations) Len() int { return len(a) } +func (a LexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a LexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } + +func TestAddDeleteGet(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewTagStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + testImageID1 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c") + testImageID2 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d") + testImageID3 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e") + + // Try adding a reference with no tag or digest + nameOnly, err := reference.WithName("username/repo") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.Add(nameOnly, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Add a few references + ref1, err := reference.ParseNamed("username/repo1:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.Add(ref1, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref2, err := reference.ParseNamed("username/repo1:old") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.Add(ref2, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref3, err := reference.ParseNamed("username/repo1:alias") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.Add(ref3, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref4, err := reference.ParseNamed("username/repo2:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.Add(ref4, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref5, err := reference.ParseNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.Add(ref5, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Attempt to overwrite with force == false + if err = store.Add(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") { + t.Fatalf("did not get expected error on overwrite attempt - got %v", err) + } + // Repeat to overwrite with force == true + if err = store.Add(ref4, testImageID3, true); err != nil { + t.Fatalf("failed to force tag overwrite: %v", err) + } + + // Check references so far + id, err := store.Get(nameOnly) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref1) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref2) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID2.String()) + } + + id, err = store.Get(ref3) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref4) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID3 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + id, err = store.Get(ref5) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + // Get should return ErrDoesNotExist for a nonexistent repo + nonExistRepo, err := reference.ParseNamed("username/nonexistrepo:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Get should return ErrDoesNotExist for a nonexistent tag + nonExistTag, err := reference.ParseNamed("username/repo1:nonexist") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Check References + refs := store.References(testImageID1) + sort.Sort(LexicalRefs(refs)) + if len(refs) != 3 { + t.Fatal("unexpected number of references") + } + if refs[0].String() != ref3.String() { + t.Fatalf("unexpected reference: %v", refs[0].String()) + } + if refs[1].String() != ref1.String() { + t.Fatalf("unexpected reference: %v", refs[1].String()) + } + if refs[2].String() != nameOnly.String()+":latest" { + t.Fatalf("unexpected reference: %v", refs[2].String()) + } + + // Check ReferencesByName + repoName, err := reference.WithName("username/repo1") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + associations := store.ReferencesByName(repoName) + sort.Sort(LexicalAssociations(associations)) + if len(associations) != 3 { + t.Fatal("unexpected number of associations") + } + if associations[0].Ref.String() != ref3.String() { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[0].ImageID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[1].Ref.String() != ref1.String() { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[1].ImageID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[2].Ref.String() != ref2.String() { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + if associations[2].ImageID != testImageID2 { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + + // Delete should return ErrDoesNotExist for a nonexistent repo + if _, err = store.Delete(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete should return ErrDoesNotExist for a nonexistent tag + if _, err = store.Delete(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete a few references + if deleted, err := store.Delete(ref1); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref1); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(ref5); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref5); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(nameOnly); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(nameOnly); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } +} From 01ba0a935b9227583ec05a96e85273cc2276bb93 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Wed, 18 Nov 2015 14:18:07 -0800 Subject: [PATCH 3/7] Add image store The image store abstracts image handling. It keeps track of the available images, and makes it possible to delete existing images or register new ones. The image store holds references to the underlying layers for each image. The image/v1 package provides compatibility functions for interoperating with older (non-content-addressable) image structures. Signed-off-by: Tonis Tiigi --- image/fixtures/post1.9/expected_computed_id | 1 - image/fixtures/post1.9/expected_config | 1 - image/fixtures/post1.9/layer_id | 1 - image/fixtures/post1.9/parent_id | 1 - image/fixtures/post1.9/v1compatibility | 1 - image/fixtures/pre1.9/expected_computed_id | 1 - image/fixtures/pre1.9/expected_config | 2 - image/fixtures/pre1.9/layer_id | 1 - image/fixtures/pre1.9/parent_id | 1 - image/fixtures/pre1.9/v1compatibility | 1 - image/fs.go | 192 ++++++++++ image/fs_test.go | 391 ++++++++++++++++++++ image/image.go | 163 ++++---- image/image_test.go | 86 +++-- image/rootfs.go | 8 + image/rootfs_unix.go | 23 ++ image/rootfs_windows.go | 37 ++ image/store.go | 286 ++++++++++++++ image/store_test.go | 205 ++++++++++ image/tarexport/load.go | 284 ++++++++++++++ image/tarexport/save.go | 303 +++++++++++++++ image/tarexport/tarexport.go | 36 ++ image/v1/imagev1.go | 148 ++++++++ 23 files changed, 2029 insertions(+), 144 deletions(-) delete mode 100644 image/fixtures/post1.9/expected_computed_id delete mode 100644 image/fixtures/post1.9/expected_config delete mode 100644 image/fixtures/post1.9/layer_id delete mode 100644 image/fixtures/post1.9/parent_id delete mode 100644 image/fixtures/post1.9/v1compatibility delete mode 100644 image/fixtures/pre1.9/expected_computed_id delete mode 100644 image/fixtures/pre1.9/expected_config delete mode 100644 image/fixtures/pre1.9/layer_id delete mode 100644 image/fixtures/pre1.9/parent_id delete mode 100644 image/fixtures/pre1.9/v1compatibility create mode 100644 image/fs.go create mode 100644 image/fs_test.go create mode 100644 image/rootfs.go create mode 100644 image/rootfs_unix.go create mode 100644 image/rootfs_windows.go create mode 100644 image/store.go create mode 100644 image/store_test.go create mode 100644 image/tarexport/load.go create mode 100644 image/tarexport/save.go create mode 100644 image/tarexport/tarexport.go create mode 100644 image/v1/imagev1.go diff --git a/image/fixtures/post1.9/expected_computed_id b/image/fixtures/post1.9/expected_computed_id deleted file mode 100644 index cba6d81f4e..0000000000 --- a/image/fixtures/post1.9/expected_computed_id +++ /dev/null @@ -1 +0,0 @@ -sha256:f2722a8ec6926e02fa9f2674072cbc2a25cf0f449f27350f613cd843b02c9105 diff --git a/image/fixtures/post1.9/expected_config b/image/fixtures/post1.9/expected_config deleted file mode 100644 index ae27bdd429..0000000000 --- a/image/fixtures/post1.9/expected_config +++ /dev/null @@ -1 +0,0 @@ -{"architecture":"amd64","config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":null,"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"fb1f7270da9519308361b99dc8e0d30f12c24dfd28537c2337ece995ac853a16","container_config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":["/bin/sh","-c","#(nop) ADD file:11998b2a4d664a75cd0c3f4e4cb1837434e0f997ba157a0ac1d3c68a07aa2f4f in /"],"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-09-08T21:30:30.807853054Z","docker_version":"1.9.0-dev","layer_id":"sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a","os":"linux","parent_id":"sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02"} diff --git a/image/fixtures/post1.9/layer_id b/image/fixtures/post1.9/layer_id deleted file mode 100644 index ded2db28e7..0000000000 --- a/image/fixtures/post1.9/layer_id +++ /dev/null @@ -1 +0,0 @@ -sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a diff --git a/image/fixtures/post1.9/parent_id b/image/fixtures/post1.9/parent_id deleted file mode 100644 index 7d524f80c2..0000000000 --- a/image/fixtures/post1.9/parent_id +++ /dev/null @@ -1 +0,0 @@ -sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02 diff --git a/image/fixtures/post1.9/v1compatibility b/image/fixtures/post1.9/v1compatibility deleted file mode 100644 index d6697c2b68..0000000000 --- a/image/fixtures/post1.9/v1compatibility +++ /dev/null @@ -1 +0,0 @@ -{"id":"8dfb96b5d09e6cf6f376d81f1e2770ee5ede309f9bd9e079688c9782649ab326","parent":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","created":"2015-09-08T21:30:30.807853054Z","container":"fb1f7270da9519308361b99dc8e0d30f12c24dfd28537c2337ece995ac853a16","container_config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":["/bin/sh","-c","#(nop) ADD file:11998b2a4d664a75cd0c3f4e4cb1837434e0f997ba157a0ac1d3c68a07aa2f4f in /"],"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"docker_version":"1.9.0-dev","config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":null,"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux"} diff --git a/image/fixtures/pre1.9/expected_computed_id b/image/fixtures/pre1.9/expected_computed_id deleted file mode 100644 index c27b0b6a20..0000000000 --- a/image/fixtures/pre1.9/expected_computed_id +++ /dev/null @@ -1 +0,0 @@ -sha256:fd6ebfedda8ea140a9380767e15bd32c6e899303cfe34bc4580c931f2f816f89 diff --git a/image/fixtures/pre1.9/expected_config b/image/fixtures/pre1.9/expected_config deleted file mode 100644 index 121efe1fe6..0000000000 --- a/image/fixtures/pre1.9/expected_config +++ /dev/null @@ -1,2 +0,0 @@ -{"architecture":"amd64","config":{"Hostname":"03797203757d","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":null,"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"OnBuild":[],"Labels":{}},"container":"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253","container_config":{"Hostname":"03797203757d","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":["/bin/sh","-c","#(nop) ENTRYPOINT [\"/go/bin/dnsdock\"]"],"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"OnBuild":[],"Labels":{}},"created":"2015-08-19T16:49:11.368300679Z","docker_version":"1.6.2","layer_id":"sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a","os":"linux","parent_id":"sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02"} - diff --git a/image/fixtures/pre1.9/layer_id b/image/fixtures/pre1.9/layer_id deleted file mode 100644 index ded2db28e7..0000000000 --- a/image/fixtures/pre1.9/layer_id +++ /dev/null @@ -1 +0,0 @@ -sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a diff --git a/image/fixtures/pre1.9/parent_id b/image/fixtures/pre1.9/parent_id deleted file mode 100644 index 7d524f80c2..0000000000 --- a/image/fixtures/pre1.9/parent_id +++ /dev/null @@ -1 +0,0 @@ -sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02 diff --git a/image/fixtures/pre1.9/v1compatibility b/image/fixtures/pre1.9/v1compatibility deleted file mode 100644 index af96e82506..0000000000 --- a/image/fixtures/pre1.9/v1compatibility +++ /dev/null @@ -1 +0,0 @@ -{"id":"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9","parent":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","created":"2015-08-19T16:49:11.368300679Z","container":"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253","container_config":{"Hostname":"03797203757d","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":["/bin/sh","-c","#(nop) ENTRYPOINT [\"/go/bin/dnsdock\"]"],"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"NetworkDisabled":false,"MacAddress":"","OnBuild":[],"Labels":{}},"docker_version":"1.6.2","config":{"Hostname":"03797203757d","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":null,"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"NetworkDisabled":false,"MacAddress":"","OnBuild":[],"Labels":{}},"architecture":"amd64","os":"linux","Size":0} diff --git a/image/fs.go b/image/fs.go new file mode 100644 index 0000000000..7c1c4c22d6 --- /dev/null +++ b/image/fs.go @@ -0,0 +1,192 @@ +package image + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" +) + +// IDWalkFunc is function called by StoreBackend.Walk +type IDWalkFunc func(id ID) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f IDWalkFunc) error + Get(id ID) ([]byte, error) + Set(data []byte) (ID, error) + Delete(id ID) error + SetMetadata(id ID, key string, data []byte) error + GetMetadata(id ID, key string) ([]byte, error) + DeleteMetadata(id ID, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + return s, nil +} + +func (s *fs) contentFile(id ID) string { + dgst := digest.Digest(id) + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(id ID) string { + dgst := digest.Digest(id) + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f IDWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(ID(dgst)); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given ID. +func (s *fs) Get(id ID) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(id) +} + +func (s *fs) get(id ID) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(id)) + if err != nil { + return nil, err + } + + // todo: maybe optional + validated, err := digest.FromBytes(content) + if err != nil { + return nil, err + } + if ID(validated) != id { + return nil, fmt.Errorf("failed to verify image: %v", id) + } + + return content, nil +} + +// Set stores content under a given ID. +func (s *fs) Set(data []byte) (ID, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("Invalid empty data") + } + + dgst, err := digest.FromBytes(data) + if err != nil { + return "", err + } + id := ID(dgst) + filePath := s.contentFile(id) + tempFilePath := s.contentFile(id) + ".tmp" + if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil { + return "", err + } + if err := os.Rename(tempFilePath, filePath); err != nil { + return "", err + } + + return id, nil +} + +// Delete removes content and metadata files associated with the ID. +func (s *fs) Delete(id ID) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(id)); err != nil { + return err + } + if err := os.Remove(s.contentFile(id)); err != nil { + return err + } + return nil +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(id ID, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(id); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(id)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + filePath := filepath.Join(s.metadataDir(id), key) + tempFilePath := filePath + ".tmp" + if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil { + return err + } + return os.Rename(tempFilePath, filePath) +} + +// GetMetadata returns metadata for a given ID. +func (s *fs) GetMetadata(id ID, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(id); err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(s.metadataDir(id), key)) +} + +// DeleteMetadata removes the metadata associated with an ID. +func (s *fs) DeleteMetadata(id ID, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(id), key)) +} diff --git a/image/fs_test.go b/image/fs_test.go new file mode 100644 index 0000000000..0790b78e45 --- /dev/null +++ b/image/fs_test.go @@ -0,0 +1,391 @@ +package image + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "errors" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestFSGetSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testGetSet(t, fs) +} + +func TestFSGetInvalidData(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id, err := fs.Set([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + + dgst := digest.Digest(id) + + if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600); err != nil { + t.Fatal(err) + } + + _, err = fs.Get(id) + if err == nil { + t.Fatal("Expected get to fail after data modification.") + } +} + +func TestFSInvalidSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id, err := digest.FromBytes([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + err = os.Mkdir(filepath.Join(tmpdir, contentDirName, string(id.Algorithm()), id.Hex()), 0700) + if err != nil { + t.Fatal(err) + } + + _, err = fs.Set([]byte("foobar")) + if err == nil { + t.Fatal("Expecting error from invalid filesystem data.") + } +} + +func TestFSInvalidRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + tcases := []struct { + root, invalidFile string + }{ + {"root", "root"}, + {"root", "root/content"}, + {"root", "root/metadata"}, + } + + for _, tc := range tcases { + root := filepath.Join(tmpdir, tc.root) + filePath := filepath.Join(tmpdir, tc.invalidFile) + err := os.MkdirAll(filepath.Dir(filePath), 0700) + if err != nil { + t.Fatal(err) + } + f, err := os.Create(filePath) + if err != nil { + t.Fatal(err) + } + f.Close() + + _, err = NewFSStoreBackend(root) + if err == nil { + t.Fatalf("Expected error from root %q and invlid file %q", tc.root, tc.invalidFile) + } + + os.RemoveAll(root) + } + +} + +func testMetadataGetSet(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + tcases := []struct { + id ID + key string + value []byte + }{ + {id, "tkey", []byte("tval1")}, + {id, "tkey2", []byte("tval2")}, + {id2, "tkey", []byte("tval3")}, + } + + for _, tc := range tcases { + err = store.SetMetadata(tc.id, tc.key, tc.value) + if err != nil { + t.Fatal(err) + } + + actual, err := store.GetMetadata(tc.id, tc.key) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(actual, tc.value) != 0 { + t.Fatalf("Metadata expected %q, got %q", tc.value, actual) + } + } + + _, err = store.GetMetadata(id2, "tkey2") + if err == nil { + t.Fatal("Expected error for getting metadata for unknown key") + } + + id3, err := digest.FromBytes([]byte("baz")) + if err != nil { + t.Fatal(err) + } + + err = store.SetMetadata(ID(id3), "tkey", []byte("tval")) + if err == nil { + t.Fatal("Expected error for setting metadata for unknown ID.") + } + + _, err = store.GetMetadata(ID(id3), "tkey") + if err == nil { + t.Fatal("Expected error for getting metadata for unknown ID.") + } +} + +func TestFSMetadataGetSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testMetadataGetSet(t, fs) +} + +func TestFSDelete(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testDelete(t, fs) +} + +func TestFSWalker(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testWalker(t, fs) +} + +func TestFSInvalidWalker(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + fooID, err := fs.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, "sha256/foobar"), []byte("foobar"), 0600); err != nil { + t.Fatal(err) + } + + n := 0 + err = fs.Walk(func(id ID) error { + if id != fooID { + t.Fatalf("Invalid walker ID %q, expected %q", id, fooID) + } + n++ + return nil + }) + if err != nil { + t.Fatalf("Invalid data should not have caused walker error, got %v", err) + } + if n != 1 { + t.Fatalf("Expected 1 walk initialization, got %d", n) + } +} + +func testGetSet(t *testing.T, store StoreBackend) { + type tcase struct { + input []byte + expected ID + } + tcases := []tcase{ + {[]byte("foobar"), ID("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")}, + } + + randomInput := make([]byte, 8*1024) + _, err := rand.Read(randomInput) + if err != nil { + t.Fatal(err) + } + // skipping use of digest pkg because its used by the imlementation + h := sha256.New() + _, err = h.Write(randomInput) + if err != nil { + t.Fatal(err) + } + tcases = append(tcases, tcase{ + input: randomInput, + expected: ID("sha256:" + hex.EncodeToString(h.Sum(nil))), + }) + + for _, tc := range tcases { + id, err := store.Set([]byte(tc.input)) + if err != nil { + t.Fatal(err) + } + if id != tc.expected { + t.Fatalf("Expected ID %q, got %q", tc.expected, id) + } + } + + for _, emptyData := range [][]byte{nil, {}} { + _, err := store.Set(emptyData) + if err == nil { + t.Fatal("Expected error for nil input.") + } + } + + for _, tc := range tcases { + data, err := store.Get(tc.expected) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(data, tc.input) != 0 { + t.Fatalf("Expected data %q, got %q", tc.input, data) + } + } + + for _, key := range []ID{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} { + _, err := store.Get(key) + if err == nil { + t.Fatalf("Expected error for ID %q.", key) + } + } + +} + +func testDelete(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + err = store.Delete(id) + if err != nil { + t.Fatal(err) + } + + _, err = store.Get(id) + if err == nil { + t.Fatalf("Expected getting deleted item %q to fail", id) + } + _, err = store.Get(id2) + if err != nil { + t.Fatal(err) + } + + err = store.Delete(id2) + if err != nil { + t.Fatal(err) + } + _, err = store.Get(id2) + if err == nil { + t.Fatalf("Expected getting deleted item %q to fail", id2) + } +} + +func testWalker(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + tcases := make(map[ID]struct{}) + tcases[id] = struct{}{} + tcases[id2] = struct{}{} + n := 0 + err = store.Walk(func(id ID) error { + delete(tcases, id) + n++ + return nil + }) + if err != nil { + t.Fatal(err) + } + + if n != 2 { + t.Fatalf("Expected 2 walk initializations, got %d", n) + } + if len(tcases) != 0 { + t.Fatalf("Expected empty unwalked set, got %+v", tcases) + } + + // stop on error + tcases = make(map[ID]struct{}) + tcases[id] = struct{}{} + err = store.Walk(func(id ID) error { + return errors.New("") + }) + if err == nil { + t.Fatalf("Exected error from walker.") + } +} diff --git a/image/image.go b/image/image.go index 89799160da..ed44ef377f 100644 --- a/image/image.go +++ b/image/image.go @@ -2,36 +2,23 @@ package image import ( "encoding/json" - "fmt" - "regexp" + "errors" + "io" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" - derr "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/version" "github.com/docker/docker/runconfig" ) -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) +// ID is the content-addressable ID of an image. +type ID digest.Digest -// noFallbackMinVersion is the minimum version for which v1compatibility -// information will not be marshaled through the Image struct to remove -// blank fields. -var noFallbackMinVersion = version.Version("1.8.3") - -// Descriptor provides the information necessary to register an image in -// the graph. -type Descriptor interface { - ID() string - Parent() string - MarshalConfig() ([]byte, error) +func (id ID) String() string { + return digest.Digest(id).String() } -// Image stores the image configuration. -// All fields in this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Image struct { +// V1Image stores the V1 image configuration. +type V1Image struct { // ID a unique 64 character identifier of the image ID string `json:"id,omitempty"` // Parent id of the image @@ -55,95 +42,87 @@ type Image struct { // OS is the operating system used to build and run the image OS string `json:"os,omitempty"` // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` // capitalized for backwards compatibility - // ParentID specifies the strong, content address of the parent configuration. - ParentID digest.Digest `json:"parent_id,omitempty"` - // LayerID provides the content address of the associated layer. - LayerID digest.Digest `json:"layer_id,omitempty"` + Size int64 `json:",omitempty"` } -// NewImgJSON creates an Image configuration from json. -func NewImgJSON(src []byte) (*Image, error) { - ret := &Image{} +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` - // FIXME: Is there a cleaner way to "purify" the input json? - if err := json.Unmarshal(src, ret); err != nil { - return nil, err - } - return ret, nil + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID } -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return derr.ErrorCodeInvalidImageID.WithArgs(id) - } - return nil +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON } -// MakeImageConfig returns immutable configuration JSON for image based on the -// v1Compatibility object, layer digest and parent StrongID. SHA256() of this -// config is the new image ID (strongID). -func MakeImageConfig(v1Compatibility []byte, layerID, parentID digest.Digest) ([]byte, error) { +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} - // Detect images created after 1.8.3 - img, err := NewImgJSON(v1Compatibility) +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) if err != nil { return nil, err } - useFallback := version.Version(img.DockerVersion).LessThan(noFallbackMinVersion) - - if useFallback { - // Fallback for pre-1.8.3. Calculate base config based on Image struct - // so that fields with default values added by Docker will use same ID - logrus.Debugf("Using fallback hash for %v", layerID) - - v1Compatibility, err = json.Marshal(img) - if err != nil { - return nil, err - } - } var c map[string]*json.RawMessage - if err := json.Unmarshal(v1Compatibility, &c); err != nil { + if err := json.Unmarshal(pass1, &c); err != nil { return nil, err } - - if err := layerID.Validate(); err != nil { - return nil, fmt.Errorf("invalid layerID: %v", err) - } - - c["layer_id"] = rawJSON(layerID) - - if parentID != "" { - if err := parentID.Validate(); err != nil { - return nil, fmt.Errorf("invalid parentID %v", err) - } - c["parent_id"] = rawJSON(parentID) - } - - delete(c, "id") - delete(c, "parent") - delete(c, "Size") // Size is calculated from data on disk and is inconsitent - return json.Marshal(c) } -// StrongID returns image ID for the config JSON. -func StrongID(configJSON []byte) (digest.Digest, error) { - digester := digest.Canonical.New() - if _, err := digester.Hash().Write(configJSON); err != nil { - return "", err - } - dgst := digester.Digest() - logrus.Debugf("H(%v) = %v", string(configJSON), dgst) - return dgst, nil +// History stores build commands that were used to create an image +type History struct { + // Created timestamp for build point + Created time.Time `json:"created"` + // Author of the build point + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building image. + CreatedBy string `json:"created_by,omitempty"` + // Comment is custom mesage set by the user when creating the image. + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` } -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) +// Exporter provides interface for exporting and importing images +type Exporter interface { + Load(io.ReadCloser, io.Writer) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("Invalid image JSON, no RootFS key.") + } + + img.rawJSON = src + + return img, nil } diff --git a/image/image_test.go b/image/image_test.go index 77d92c4490..525023b813 100644 --- a/image/image_test.go +++ b/image/image_test.go @@ -1,55 +1,59 @@ package image import ( - "bytes" - "io/ioutil" + "encoding/json" + "sort" + "strings" "testing" - - "github.com/docker/distribution/digest" ) -var fixtures = []string{ - "fixtures/pre1.9", - "fixtures/post1.9", -} +const sampleImageJSON = `{ + "architecture": "amd64", + "os": "linux", + "config": {}, + "rootfs": { + "type": "layers", + "diff_ids": [] + } +}` -func loadFixtureFile(t *testing.T, path string) []byte { - fileData, err := ioutil.ReadFile(path) +func TestJSON(t *testing.T) { + img, err := NewFromJSON([]byte(sampleImageJSON)) if err != nil { - t.Fatalf("error opening %s: %v", path, err) + t.Fatal(err) } - - return bytes.TrimSpace(fileData) -} - -// TestMakeImageConfig makes sure that MakeImageConfig returns the expected -// canonical JSON for a reference Image. -func TestMakeImageConfig(t *testing.T) { - for _, fixture := range fixtures { - v1Compatibility := loadFixtureFile(t, fixture+"/v1compatibility") - expectedConfig := loadFixtureFile(t, fixture+"/expected_config") - layerID := digest.Digest(loadFixtureFile(t, fixture+"/layer_id")) - parentID := digest.Digest(loadFixtureFile(t, fixture+"/parent_id")) - - json, err := MakeImageConfig(v1Compatibility, layerID, parentID) - if err != nil { - t.Fatalf("MakeImageConfig on %s returned error: %v", fixture, err) - } - if !bytes.Equal(json, expectedConfig) { - t.Fatalf("did not get expected JSON for %s\nexpected: %s\ngot: %s", fixture, expectedConfig, json) - } + rawJSON := img.RawJSON() + if string(rawJSON) != sampleImageJSON { + t.Fatalf("Raw JSON of config didn't match: expected %+v, got %v", sampleImageJSON, rawJSON) } } -// TestGetStrongID makes sure that GetConfigJSON returns the expected -// hash for a reference Image. -func TestGetStrongID(t *testing.T) { - for _, fixture := range fixtures { - expectedConfig := loadFixtureFile(t, fixture+"/expected_config") - expectedComputedID := digest.Digest(loadFixtureFile(t, fixture+"/expected_computed_id")) - - if id, err := StrongID(expectedConfig); err != nil || id != expectedComputedID { - t.Fatalf("did not get expected ID for %s\nexpected: %s\ngot: %s\nerror: %v", fixture, expectedComputedID, id, err) - } +func TestInvalidJSON(t *testing.T) { + _, err := NewFromJSON([]byte("{}")) + if err == nil { + t.Fatal("Expected JSON parse error") + } +} + +func TestMarshalKeyOrder(t *testing.T) { + b, err := json.Marshal(&Image{ + V1Image: V1Image{ + Comment: "a", + Author: "b", + Architecture: "c", + }, + }) + if err != nil { + t.Fatal(err) + } + + expectedOrder := []string{"architecture", "author", "comment"} + var indexes []int + for _, k := range expectedOrder { + indexes = append(indexes, strings.Index(string(b), k)) + } + + if !sort.IntsAreSorted(indexes) { + t.Fatal("invalid key order in JSON: ", string(b)) } } diff --git a/image/rootfs.go b/image/rootfs.go new file mode 100644 index 0000000000..b546696d6a --- /dev/null +++ b/image/rootfs.go @@ -0,0 +1,8 @@ +package image + +import "github.com/docker/docker/layer" + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} diff --git a/image/rootfs_unix.go b/image/rootfs_unix.go new file mode 100644 index 0000000000..e817db5184 --- /dev/null +++ b/image/rootfs_unix.go @@ -0,0 +1,23 @@ +// +build !windows + +package image + +import "github.com/docker/docker/layer" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into a interface that supports different implementaions. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + return layer.CreateChainID(r.DiffIDs) +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: "layers"} +} diff --git a/image/rootfs_windows.go b/image/rootfs_windows.go new file mode 100644 index 0000000000..6a2b179d45 --- /dev/null +++ b/image/rootfs_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package image + +import ( + "crypto/sha512" + "fmt" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into a interface that supports different implementaions. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` +} + +// BaseLayerID returns the 64 byte hex ID for the baselayer name. +func (r *RootFS) BaseLayerID() string { + baseID := sha512.Sum384([]byte(r.BaseLayer)) + return fmt.Sprintf("%x", baseID[:32]) +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + baseDiffID, _ := digest.FromBytes([]byte(r.BaseLayerID())) // can never error + return layer.CreateChainID(append([]layer.DiffID{layer.DiffID(baseDiffID)}, r.DiffIDs...)) +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: "layers+base"} +} diff --git a/image/store.go b/image/store.go new file mode 100644 index 0000000000..a9c02d6ed3 --- /dev/null +++ b/image/store.go @@ -0,0 +1,286 @@ +package image + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.Mutex + ls LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digest.Set +} + +// NewImageStore returns new store object for given layer store +func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { + is := &store{ + ls: ls, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digest.NewSet(), + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(id ID) error { + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image %v, %v", id, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + l, err = is.ls.Get(chainID) + if err != nil { + return err + } + } + if err := is.digestSet.Add(digest.Digest(id)); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[ID(id)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := ID(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + l, err = is.ls.Get(layerID) + if err != nil { + return "", err + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(digest.Digest(imageID)); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +func (is *store) Search(term string) (ID, error) { + is.Lock() + defer is.Unlock() + + dgst, err := is.digestSet.Lookup(term) + if err != nil { + return "", err + } + return ID(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id, "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + delete(is.images, id) + is.fs.Delete(id) + + if imageMeta.layer != nil { + return is.ls.Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id, "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id, "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +func (is *store) Children(id ID) []ID { + is.Lock() + defer is.Unlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.Lock() + defer is.Unlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} diff --git a/image/store_test.go b/image/store_test.go new file mode 100644 index 0000000000..756b3922d6 --- /dev/null +++ b/image/store_test.go @@ -0,0 +1,205 @@ +package image + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +func TestRestore(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + _, err = fs.Set([]byte(`invalid`)) + if err != nil { + t.Fatal(err) + } + id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + err = fs.SetMetadata(id2, "parent", []byte(id1)) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + imgs := is.Map() + if actual, expected := len(imgs), 2; actual != expected { + t.Fatalf("invalid images length, expected 2, got %q", len(imgs)) + } + + img1, err := is.Get(ID(id1)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img1.computedID, ID(id1); actual != expected { + t.Fatalf("invalid image ID: expected %q, got %q", expected, actual) + } + + if actual, expected := img1.computedID.String(), string(id1); actual != expected { + t.Fatalf("invalid image ID string: expected %q, got %q", expected, actual) + } + + img2, err := is.Get(ID(id2)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img1.Comment, "abc"; actual != expected { + t.Fatalf("invalid comment for image1: expected %q, got %q", expected, actual) + } + + if actual, expected := img2.Comment, "def"; actual != expected { + t.Fatalf("invalid comment for image2: expected %q, got %q", expected, actual) + } + + p, err := is.GetParent(ID(id1)) + if err == nil { + t.Fatal("expected error for getting parent") + } + + p, err = is.GetParent(ID(id2)) + if err != nil { + t.Fatal(err) + } + if actual, expected := p, ID(id1); actual != expected { + t.Fatalf("invalid parent: expected %q, got %q", expected, actual) + } + + children := is.Children(ID(id1)) + if len(children) != 1 { + t.Fatalf("invalid children length: %q", len(children)) + } + if actual, expected := children[0], ID(id2); actual != expected { + t.Fatalf("invalid child for id1: expected %q, got %q", expected, actual) + } + + heads := is.Heads() + if actual, expected := len(heads), 1; actual != expected { + t.Fatalf("invalid images length: expected %q, got %q", expected, actual) + } + + sid1, err := is.Search(string(id1)[:10]) + if err != nil { + t.Fatal(err) + } + if actual, expected := sid1, ID(id1); actual != expected { + t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) + } + + sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) + if err != nil { + t.Fatal(err) + } + if actual, expected := sid1, ID(id1); actual != expected { + t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) + } + + invalidPattern := digest.Digest(id1).Hex()[1:6] + _, err = is.Search(invalidPattern) + if err == nil { + t.Fatalf("expected search for %q to fail", invalidPattern) + } + +} + +func TestAddDelete(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := id1, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"); actual != expected { + t.Fatalf("create ID mismatch: expected %q, got %q", expected, actual) + } + + img, err := is.Get(id1) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img.Comment, "abc"; actual != expected { + t.Fatalf("invalid comment in image: expected %q, got %q", expected, actual) + } + + id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + + err = is.SetParent(id2, id1) + if err != nil { + t.Fatal(err) + } + + pid1, err := is.GetParent(id2) + if err != nil { + t.Fatal(err) + } + if actual, expected := pid1, id1; actual != expected { + t.Fatalf("invalid parent for image: expected %q, got %q", expected, actual) + } + + _, err = is.Delete(id1) + if err != nil { + t.Fatal(err) + } + _, err = is.Get(id1) + if err == nil { + t.Fatalf("expected get for deleted image %q to fail", id1) + } + _, err = is.Get(id2) + if err != nil { + t.Fatal(err) + } + pid1, err = is.GetParent(id2) + if err == nil { + t.Fatalf("expected parent check for image %q to fail, got %q", id2, pid1) + } + +} + +type mockLayerGetReleaser struct{} + +func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (ls *mockLayerGetReleaser) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} diff --git a/image/tarexport/load.go b/image/tarexport/load.go new file mode 100644 index 0000000000..5d5507e0f3 --- /dev/null +++ b/image/tarexport/load.go @@ -0,0 +1,284 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/symlink" +) + +func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer) error { + tmpDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { + return err + } + // read manifest, if no file then load in legacy mode + manifestPath, err := safePath(tmpDir, manifestFileName) + if err != nil { + return err + } + manifestFile, err := os.Open(manifestPath) + if err != nil { + if os.IsNotExist(err) { + return l.legacyLoad(tmpDir, outStream) + } + return manifestFile.Close() + } + defer manifestFile.Close() + + var manifest []manifestItem + if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { + return err + } + + for _, m := range manifest { + configPath, err := safePath(tmpDir, m.Config) + if err != nil { + return err + } + config, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + img, err := image.NewFromJSON(config) + if err != nil { + return err + } + var rootFS image.RootFS + rootFS = *img.RootFS + rootFS.DiffIDs = nil + + if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { + return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) + } + + for i, diffID := range img.RootFS.DiffIDs { + layerPath, err := safePath(tmpDir, m.Layers[i]) + if err != nil { + return err + } + newLayer, err := l.loadLayer(layerPath, rootFS) + if err != nil { + return err + } + defer layer.ReleaseAndLog(l.ls, newLayer) + if expected, actual := diffID, newLayer.DiffID(); expected != actual { + return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) + } + rootFS.Append(diffID) + } + + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + for _, repoTag := range m.RepoTags { + named, err := reference.ParseNamed(repoTag) + if err != nil { + return err + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid tag %q", repoTag) + } + l.setLoadedTag(ref, imgID, outStream) + } + + } + + return nil +} + +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS) (layer.Layer, error) { + rawTar, err := os.Open(filename) + if err != nil { + logrus.Debugf("Error reading embedded tar: %v", err) + return nil, err + } + inflatedLayerData, err := archive.DecompressStream(rawTar) + if err != nil { + return nil, err + } + + defer rawTar.Close() + defer inflatedLayerData.Close() + + return l.ls.Register(inflatedLayerData, rootFS.ChainID()) +} + +func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID image.ID, outStream io.Writer) error { + if prevID, err := l.ts.Get(ref); err == nil && prevID != imgID { + fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags + } + + if err := l.ts.Add(ref, imgID, true); err != nil { + return err + } + return nil +} + +func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer) error { + legacyLoadedMap := make(map[string]image.ID) + + dirs, err := ioutil.ReadDir(tmpDir) + if err != nil { + return err + } + + // every dir represents an image + for _, d := range dirs { + if d.IsDir() { + if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap); err != nil { + return err + } + } + } + + // load tags from repositories file + repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) + if err != nil { + return err + } + repositoriesFile, err := os.Open(repositoriesPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + return repositoriesFile.Close() + } + defer repositoriesFile.Close() + + repositories := make(map[string]map[string]string) + if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { + return err + } + + for name, tagMap := range repositories { + for tag, oldID := range tagMap { + imgID, ok := legacyLoadedMap[oldID] + if !ok { + return fmt.Errorf("invalid target ID: %v", oldID) + } + named, err := reference.WithName(name) + if err != nil { + return err + } + ref, err := reference.WithTag(named, tag) + if err != nil { + return err + } + l.setLoadedTag(ref, imgID, outStream) + } + } + + return nil +} + +func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID) error { + if _, loaded := loadedMap[oldID]; loaded { + return nil + } + configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) + if err != nil { + return err + } + imageJSON, err := ioutil.ReadFile(configPath) + if err != nil { + logrus.Debugf("Error reading json: %v", err) + return err + } + + var img struct{ Parent string } + if err := json.Unmarshal(imageJSON, &img); err != nil { + return err + } + + var parentID image.ID + if img.Parent != "" { + for { + var loaded bool + if parentID, loaded = loadedMap[img.Parent]; !loaded { + if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap); err != nil { + return err + } + } else { + break + } + } + } + + // todo: try to connect with migrate code + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := l.is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) + if err != nil { + return err + } + newLayer, err := l.loadLayer(layerPath, *rootFS) + if err != nil { + return err + } + rootFS.Append(newLayer.DiffID()) + + h, err := v1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + metadata, err := l.ls.Release(newLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return err + } + + if parentID != "" { + if err := l.is.SetParent(imgID, parentID); err != nil { + return err + } + } + + loadedMap[oldID] = imgID + return nil +} + +func safePath(base, path string) (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) +} diff --git a/image/tarexport/save.go b/image/tarexport/save.go new file mode 100644 index 0000000000..1019e29244 --- /dev/null +++ b/image/tarexport/save.go @@ -0,0 +1,303 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/registry" + "github.com/docker/docker/tag" +) + +type imageDescriptor struct { + refs []reference.NamedTagged + layers []string +} + +type saveSession struct { + *tarexporter + outDir string + images map[image.ID]*imageDescriptor + savedLayers map[string]struct{} +} + +func (l *tarexporter) Save(names []string, outStream io.Writer) error { + images, err := l.parseNames(names) + if err != nil { + return err + } + + return (&saveSession{tarexporter: l, images: images}).save(outStream) +} + +func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { + imgDescr := make(map[image.ID]*imageDescriptor) + + addAssoc := func(id image.ID, ref reference.Named) { + if _, ok := imgDescr[id]; !ok { + imgDescr[id] = &imageDescriptor{} + } + + if ref != nil { + var tagged reference.NamedTagged + if _, ok := ref.(reference.Digested); ok { + return + } + var ok bool + if tagged, ok = ref.(reference.NamedTagged); !ok { + var err error + if tagged, err = reference.WithTag(ref, tag.DefaultTag); err != nil { + return + } + } + + for _, t := range imgDescr[id].refs { + if tagged.String() == t.String() { + return + } + } + imgDescr[id].refs = append(imgDescr[id].refs, tagged) + } + } + + for _, name := range names { + ref, err := reference.ParseNamed(name) + if err != nil { + return nil, err + } + ref = registry.NormalizeLocalReference(ref) + if ref.Name() == string(digest.Canonical) { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + continue + } + if _, ok := ref.(reference.Digested); !ok { + if _, ok := ref.(reference.NamedTagged); !ok { + assocs := l.ts.ReferencesByName(ref) + for _, assoc := range assocs { + addAssoc(assoc.ImageID, assoc.Ref) + } + if len(assocs) == 0 { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + } + continue + } + } + var imgID image.ID + if imgID, err = l.ts.Get(ref); err != nil { + return nil, err + } + addAssoc(imgID, ref) + + } + return imgDescr, nil +} + +func (s *saveSession) save(outStream io.Writer) error { + s.savedLayers = make(map[string]struct{}) + + // get image json + tempDir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + s.outDir = tempDir + reposLegacy := make(map[string]map[string]string) + + var manifest []manifestItem + + for id, imageDescr := range s.images { + if err = s.saveImage(id); err != nil { + return err + } + + var repoTags []string + var layers []string + + for _, ref := range imageDescr.refs { + if _, ok := reposLegacy[ref.Name()]; !ok { + reposLegacy[ref.Name()] = make(map[string]string) + } + reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] + repoTags = append(repoTags, ref.String()) + } + + for _, l := range imageDescr.layers { + layers = append(layers, filepath.Join(l, legacyLayerFileName)) + } + + manifest = append(manifest, manifestItem{ + Config: digest.Digest(id).Hex() + ".json", + RepoTags: repoTags, + Layers: layers, + }) + } + + if len(reposLegacy) > 0 { + reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) + f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(reposLegacy); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := os.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + } + + manifestFileName := filepath.Join(tempDir, manifestFileName) + f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(manifest); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := os.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + + fs, err := archive.Tar(tempDir, archive.Uncompressed) + if err != nil { + return err + } + defer fs.Close() + + if _, err := io.Copy(outStream, fs); err != nil { + return err + } + return nil +} + +func (s *saveSession) saveImage(id image.ID) error { + img, err := s.is.Get(id) + if err != nil { + return err + } + + if len(img.RootFS.DiffIDs) == 0 { + return fmt.Errorf("empty export - not implemented") + } + + var parent digest.Digest + var layers []string + for i := range img.RootFS.DiffIDs { + v1Img := image.V1Image{} + if i == len(img.RootFS.DiffIDs)-1 { + v1Img = img.V1Image + } + rootFS := *img.RootFS + rootFS.DiffIDs = rootFS.DiffIDs[:i+1] + v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) + if err != nil { + return err + } + + v1Img.ID = v1ID.Hex() + if parent != "" { + v1Img.Parent = parent.Hex() + } + + if err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created); err != nil { + return err + } + layers = append(layers, v1Img.ID) + parent = v1ID + } + + configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") + if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { + return err + } + if err := os.Chtimes(configFile, img.Created, img.Created); err != nil { + return err + } + + s.images[id].layers = layers + return nil +} + +func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) error { + if _, exists := s.savedLayers[legacyImg.ID]; exists { + return nil + } + + outDir := filepath.Join(s.outDir, legacyImg.ID) + if err := os.Mkdir(outDir, 0755); err != nil { + return err + } + + // todo: why is this version file here? + if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { + return err + } + + imageConfig, err := json.Marshal(legacyImg) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { + return err + } + + // serialize filesystem + tarFile, err := os.Create(filepath.Join(outDir, legacyLayerFileName)) + if err != nil { + return err + } + defer tarFile.Close() + + l, err := s.ls.Get(id) + if err != nil { + return err + } + defer layer.ReleaseAndLog(s.ls, l) + + arch, err := l.TarStream() + if err != nil { + return err + } + if _, err := io.Copy(tarFile, arch); err != nil { + return err + } + + for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { + // todo: maybe save layer created timestamp? + if err := os.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { + return err + } + } + + s.savedLayers[legacyImg.ID] = struct{}{} + return nil +} diff --git a/image/tarexport/tarexport.go b/image/tarexport/tarexport.go new file mode 100644 index 0000000000..3369d665c9 --- /dev/null +++ b/image/tarexport/tarexport.go @@ -0,0 +1,36 @@ +package tarexport + +import ( + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/tag" +) + +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +type manifestItem struct { + Config string + RepoTags []string + Layers []string +} + +type tarexporter struct { + is image.Store + ls layer.Store + ts tag.Store +} + +// NewTarExporter returns new ImageExporter for tar packages +func NewTarExporter(is image.Store, ls layer.Store, ts tag.Store) image.Exporter { + return &tarexporter{ + is: is, + ls: ls, + ts: ts, + } +} diff --git a/image/v1/imagev1.go b/image/v1/imagev1.go new file mode 100644 index 0000000000..4a67c017b3 --- /dev/null +++ b/image/v1/imagev1.go @@ -0,0 +1,148 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/version" +) + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +// noFallbackMinVersion is the minimum version for which v1compatibility +// information will not be marshaled through the Image struct to remove +// blank fields. +var noFallbackMinVersion = version.Version("1.8.3") + +// HistoryFromConfig creates a History struct from v1 configuration JSON +func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { + h := image.History{} + var v1Image image.V1Image + if err := json.Unmarshal(imageJSON, &v1Image); err != nil { + return h, err + } + + return image.History{ + Author: v1Image.Author, + Created: v1Image.Created, + CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd.Slice(), " "), + Comment: v1Image.Comment, + EmptyLayer: emptyLayer, + }, nil +} + +// CreateID creates an ID from v1 image, layerID and parent ID. +// Used for backwards compatibility with old clients. +func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { + v1Image.ID = "" + v1JSON, err := json.Marshal(v1Image) + if err != nil { + return "", err + } + + var config map[string]*json.RawMessage + if err := json.Unmarshal(v1JSON, &config); err != nil { + return "", err + } + + // FIXME: note that this is slightly incompatible with RootFS logic + config["layer_id"] = rawJSON(layerID) + if parent != "" { + config["parent"] = rawJSON(parent) + } + + configJSON, err := json.Marshal(config) + if err != nil { + return "", err + } + logrus.Debugf("CreateV1ID %s", configJSON) + + return digest.FromBytes(configJSON) +} + +// MakeConfigFromV1Config creates an image config from the legacy V1 config format. +func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := version.Version(dver.DockerVersion).LessThan(noFallbackMinVersion) + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + delete(c, "id") + delete(c, "parent") + delete(c, "Size") // Size is calculated from data on disk and is inconsitent + delete(c, "parent_id") + delete(c, "layer_id") + delete(c, "throwaway") + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return json.Marshal(c) +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct +func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID '%s' is invalid ", id) + } + return nil +} From 694df3ff9f0f6b6acd4f3d866d443c740cce3f3f Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 18 Nov 2015 14:18:44 -0800 Subject: [PATCH 4/7] Add distribution package Signed-off-by: Aaron Lehmann --- .../fixtures/validate_manifest/bad_manifest | 38 ++ .../validate_manifest/extra_data_manifest | 46 ++ .../fixtures/validate_manifest/good_manifest | 38 ++ distribution/metadata/blobsum_service.go | 100 ++++ distribution/metadata/blobsum_service_test.go | 105 ++++ distribution/metadata/metadata.go | 65 +++ distribution/metadata/v1_id_service.go | 44 ++ distribution/metadata/v1_id_service_test.go | 83 +++ distribution/pool.go | 51 ++ distribution/pool_test.go | 28 + distribution/pull.go | 185 +++++++ distribution/pull_v1.go | 454 ++++++++++++++++ distribution/pull_v2.go | 512 ++++++++++++++++++ distribution/pull_v2_test.go | 174 ++++++ distribution/pull_v2_unix.go | 12 + distribution/pull_v2_windows.go | 29 + distribution/push.go | 179 ++++++ distribution/push_v1.go | 466 ++++++++++++++++ distribution/push_v2.go | 410 ++++++++++++++ distribution/push_v2_test.go | 176 ++++++ distribution/push_v2_unix.go | 12 + distribution/push_v2_windows.go | 28 + distribution/registry.go | 115 ++++ 23 files changed, 3350 insertions(+) create mode 100644 distribution/fixtures/validate_manifest/bad_manifest create mode 100644 distribution/fixtures/validate_manifest/extra_data_manifest create mode 100644 distribution/fixtures/validate_manifest/good_manifest create mode 100644 distribution/metadata/blobsum_service.go create mode 100644 distribution/metadata/blobsum_service_test.go create mode 100644 distribution/metadata/metadata.go create mode 100644 distribution/metadata/v1_id_service.go create mode 100644 distribution/metadata/v1_id_service_test.go create mode 100644 distribution/pool.go create mode 100644 distribution/pool_test.go create mode 100644 distribution/pull.go create mode 100644 distribution/pull_v1.go create mode 100644 distribution/pull_v2.go create mode 100644 distribution/pull_v2_test.go create mode 100644 distribution/pull_v2_unix.go create mode 100644 distribution/pull_v2_windows.go create mode 100644 distribution/push.go create mode 100644 distribution/push_v1.go create mode 100644 distribution/push_v2.go create mode 100644 distribution/push_v2_test.go create mode 100644 distribution/push_v2_unix.go create mode 100644 distribution/push_v2_windows.go create mode 100644 distribution/registry.go diff --git a/distribution/fixtures/validate_manifest/bad_manifest b/distribution/fixtures/validate_manifest/bad_manifest new file mode 100644 index 0000000000..a1f02a62a3 --- /dev/null +++ b/distribution/fixtures/validate_manifest/bad_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 2, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/distribution/fixtures/validate_manifest/extra_data_manifest b/distribution/fixtures/validate_manifest/extra_data_manifest new file mode 100644 index 0000000000..beec19a801 --- /dev/null +++ b/distribution/fixtures/validate_manifest/extra_data_manifest @@ -0,0 +1,46 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "fsLayers": [ + { + "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/distribution/fixtures/validate_manifest/good_manifest b/distribution/fixtures/validate_manifest/good_manifest new file mode 100644 index 0000000000..b107de3226 --- /dev/null +++ b/distribution/fixtures/validate_manifest/good_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} \ No newline at end of file diff --git a/distribution/metadata/blobsum_service.go b/distribution/metadata/blobsum_service.go new file mode 100644 index 0000000000..88ed7bb197 --- /dev/null +++ b/distribution/metadata/blobsum_service.go @@ -0,0 +1,100 @@ +package metadata + +import ( + "encoding/json" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// BlobSumService maps layer IDs to a set of known blobsums for +// the layer. +type BlobSumService struct { + store Store +} + +// maxBlobSums is the number of blobsums to keep per layer DiffID. +const maxBlobSums = 5 + +// NewBlobSumService creates a new blobsum mapping service. +func NewBlobSumService(store Store) *BlobSumService { + return &BlobSumService{ + store: store, + } +} + +func (blobserv *BlobSumService) diffIDNamespace() string { + return "blobsum-storage" +} + +func (blobserv *BlobSumService) blobSumNamespace() string { + return "blobsum-lookup" +} + +func (blobserv *BlobSumService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (blobserv *BlobSumService) blobSumKey(blobsum digest.Digest) string { + return string(blobsum.Algorithm()) + "/" + blobsum.Hex() +} + +// GetBlobSums finds the blobsums associated with a layer DiffID. +func (blobserv *BlobSumService) GetBlobSums(diffID layer.DiffID) ([]digest.Digest, error) { + jsonBytes, err := blobserv.store.Get(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var blobsums []digest.Digest + if err := json.Unmarshal(jsonBytes, &blobsums); err != nil { + return nil, err + } + + return blobsums, nil +} + +// GetDiffID finds a layer DiffID from a blobsum hash. +func (blobserv *BlobSumService) GetDiffID(blobsum digest.Digest) (layer.DiffID, error) { + diffIDBytes, err := blobserv.store.Get(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates a blobsum with a layer DiffID. If too many blobsums are +// present, the oldest one is dropped. +func (blobserv *BlobSumService) Add(diffID layer.DiffID, blobsum digest.Digest) error { + oldBlobSums, err := blobserv.GetBlobSums(diffID) + if err != nil { + oldBlobSums = nil + } + newBlobSums := make([]digest.Digest, 0, len(oldBlobSums)+1) + + // Copy all other blobsums to new slice + for _, oldSum := range oldBlobSums { + if oldSum != blobsum { + newBlobSums = append(newBlobSums, oldSum) + } + } + + newBlobSums = append(newBlobSums, blobsum) + + if len(newBlobSums) > maxBlobSums { + newBlobSums = newBlobSums[len(newBlobSums)-maxBlobSums:] + } + + jsonBytes, err := json.Marshal(newBlobSums) + if err != nil { + return err + } + + err = blobserv.store.Set(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return blobserv.store.Set(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum), []byte(diffID)) +} diff --git a/distribution/metadata/blobsum_service_test.go b/distribution/metadata/blobsum_service_test.go new file mode 100644 index 0000000000..dee64df1ee --- /dev/null +++ b/distribution/metadata/blobsum_service_test.go @@ -0,0 +1,105 @@ +package metadata + +import ( + "io/ioutil" + "os" + "reflect" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +func TestBlobSumService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + blobSumService := NewBlobSumService(metadataStore) + + testVectors := []struct { + diffID layer.DiffID + blobsums []digest.Digest + }{ + { + diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + blobsums: []digest.Digest{ + digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"), + }, + }, + { + diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + blobsums: []digest.Digest{ + digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"), + digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"), + }, + }, + { + diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + blobsums: []digest.Digest{ + digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"), + digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"), + digest.Digest("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), + digest.Digest("sha256:8902a7ca89aabbb868835260912159026637634090dd8899eee969523252236e"), + digest.Digest("sha256:c84364306344ccc48532c52ff5209236273525231dddaaab53262322352883aa"), + digest.Digest("sha256:aa7583bbc87532a8352bbb72520a821b3623523523a8352523a52352aaa888fe"), + }, + }, + } + + // Set some associations + for _, vec := range testVectors { + for _, blobsum := range vec.blobsums { + err := blobSumService.Add(vec.diffID, blobsum) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + blobsums, err := blobSumService.GetBlobSums(vec.diffID) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + expectedBlobsums := len(vec.blobsums) + if expectedBlobsums > 5 { + expectedBlobsums = 5 + } + if !reflect.DeepEqual(blobsums, vec.blobsums[len(vec.blobsums)-expectedBlobsums:len(vec.blobsums)]) { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test GetBlobSums on a nonexistent entry + _, err = blobSumService.GetBlobSums(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Test GetDiffID on a nonexistent entry + _, err = blobSumService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = blobSumService.Add(testVectors[1].diffID, testVectors[0].blobsums[0]) + if err != nil { + t.Fatalf("error calling Add: %v", err) + } + diffID, err := blobSumService.GetDiffID(testVectors[0].blobsums[0]) + if err != nil { + t.Fatalf("error calling GetDiffID: %v", err) + } + if diffID != testVectors[1].diffID { + t.Fatal("GetDiffID returned incorrect diffID") + } +} diff --git a/distribution/metadata/metadata.go b/distribution/metadata/metadata.go new file mode 100644 index 0000000000..ab9cc5b626 --- /dev/null +++ b/distribution/metadata/metadata.go @@ -0,0 +1,65 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + tempFilePath := path + ".tmp" + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(tempFilePath, value, 0644); err != nil { + return err + } + return os.Rename(tempFilePath, path) +} diff --git a/distribution/metadata/v1_id_service.go b/distribution/metadata/v1_id_service.go new file mode 100644 index 0000000000..4098f8db83 --- /dev/null +++ b/distribution/metadata/v1_id_service.go @@ -0,0 +1,44 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.ChainID, error) { + if err := v1.ValidateID(v1ID); err != nil { + return layer.ChainID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.ChainID(""), err + } + return layer.ChainID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.ChainID) error { + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/distribution/metadata/v1_id_service_test.go b/distribution/metadata/v1_id_service_test.go new file mode 100644 index 0000000000..bf0f23a6dc --- /dev/null +++ b/distribution/metadata/v1_id_service_test.go @@ -0,0 +1,83 @@ +package metadata + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/layer" +) + +func TestV1IDService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "v1-id-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + v1IDService := NewV1IDService(metadataStore) + + testVectors := []struct { + registry string + v1ID string + layerID layer.ChainID + }{ + { + registry: "registry1", + v1ID: "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937", + layerID: layer.ChainID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + }, + { + registry: "registry2", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.ChainID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + }, + { + registry: "registry1", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.ChainID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + }, + } + + // Set some associations + for _, vec := range testVectors { + err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + layerID, err := v1IDService.Get(vec.v1ID, vec.registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != vec.layerID { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test Get on a nonexistent entry + _, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1") + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != testVectors[1].layerID { + t.Fatal("Get returned incorrect layer ID") + } +} diff --git a/distribution/pool.go b/distribution/pool.go new file mode 100644 index 0000000000..8c648f6e8b --- /dev/null +++ b/distribution/pool.go @@ -0,0 +1,51 @@ +package distribution + +import ( + "sync" + + "github.com/docker/docker/pkg/broadcaster" +) + +// A Pool manages concurrent pulls. It deduplicates in-progress downloads. +type Pool struct { + sync.Mutex + pullingPool map[string]*broadcaster.Buffered +} + +// NewPool creates a new Pool. +func NewPool() *Pool { + return &Pool{ + pullingPool: make(map[string]*broadcaster.Buffered), + } +} + +// add checks if a pull is already running, and returns (broadcaster, true) +// if a running operation is found. Otherwise, it creates a new one and returns +// (broadcaster, false). +func (pool *Pool) add(key string) (*broadcaster.Buffered, bool) { + pool.Lock() + defer pool.Unlock() + + if p, exists := pool.pullingPool[key]; exists { + return p, true + } + + broadcaster := broadcaster.NewBuffered() + pool.pullingPool[key] = broadcaster + + return broadcaster, false +} + +func (pool *Pool) removeWithError(key string, broadcasterResult error) error { + pool.Lock() + defer pool.Unlock() + if broadcaster, exists := pool.pullingPool[key]; exists { + broadcaster.CloseWithError(broadcasterResult) + delete(pool.pullingPool, key) + } + return nil +} + +func (pool *Pool) remove(key string) error { + return pool.removeWithError(key, nil) +} diff --git a/distribution/pool_test.go b/distribution/pool_test.go new file mode 100644 index 0000000000..80511e8342 --- /dev/null +++ b/distribution/pool_test.go @@ -0,0 +1,28 @@ +package distribution + +import ( + "testing" +) + +func TestPools(t *testing.T) { + p := NewPool() + + if _, found := p.add("test1"); found { + t.Fatal("Expected pull test1 not to be in progress") + } + if _, found := p.add("test2"); found { + t.Fatal("Expected pull test2 not to be in progress") + } + if _, found := p.add("test1"); !found { + t.Fatalf("Expected pull test1 to be in progress`") + } + if err := p.remove("test2"); err != nil { + t.Fatal(err) + } + if err := p.remove("test2"); err != nil { + t.Fatal(err) + } + if err := p.remove("test1"); err != nil { + t.Fatal(err) + } +} diff --git a/distribution/pull.go b/distribution/pull.go new file mode 100644 index 0000000000..4232ce3ca1 --- /dev/null +++ b/distribution/pull.go @@ -0,0 +1,185 @@ +package distribution + +import ( + "fmt" + "io" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/registry" + "github.com/docker/docker/tag" +) + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + // MetaHeaders stores HTTP headers with metadata about the image + // (DockerHeaders with prefix X-Meta- in the request). + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *cliconfig.AuthConfig + // OutStream is the output writer for showing the status of the pull + // operation. + OutStream io.Writer + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService *registry.Service + // EventsService is the events service to use for logging. + EventsService *events.Events + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // LayerStore manages layers. + LayerStore layer.Store + // ImageStore manages images. + ImageStore image.Store + // TagStore manages tags. + TagStore tag.Store + // Pool manages concurrent pulls. + Pool *Pool +} + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ref reference.Named) (fallback bool, err error) +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig, sf *streamformatter.StreamFormatter) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + blobSumService: metadata.NewBlobSumService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + sf: sf, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + sf: sf, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ref reference.Named, imagePullConfig *ImagePullConfig) error { + var sf = streamformatter.NewJSONStreamFormatter() + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not empty or `scratch` + if err := validateRepoName(repoInfo.LocalName.Name()); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.CanonicalName) + if err != nil { + return err + } + + logName := registry.NormalizeLocalReference(ref) + + var ( + // use a slice to append the error strings and return a joined string to caller + errors []string + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in errors. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in errors. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + ) + for _, endpoint := range endpoints { + logrus.Debugf("Trying to pull %s from %s %s", repoInfo.LocalName, endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig, sf) + if err != nil { + errors = append(errors, err.Error()) + continue + } + if fallback, err := puller.Pull(ref); err != nil { + if fallback { + if _, ok := err.(registry.ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + errors = append(errors, err.Error()) + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + errors = append(errors, err.Error()) + } + continue + } + errors = append(errors, err.Error()) + logrus.Debugf("Not continuing with error: %v", fmt.Errorf(strings.Join(errors, "\n"))) + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + } + + imagePullConfig.EventsService.Log("pull", logName.String(), "") + return nil + } + + if len(errors) == 0 { + return fmt.Errorf("no endpoints found for %s", ref.String()) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamFormatter, layersDownloaded bool) { + if layersDownloaded { + out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag)) + } else { + out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag)) + } +} + +// validateRepoName validates the name of a repository. +func validateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + if name == "scratch" { + return fmt.Errorf("'scratch' is a reserved name") + } + return nil +} diff --git a/distribution/pull_v1.go b/distribution/pull_v1.go new file mode 100644 index 0000000000..d79b517082 --- /dev/null +++ b/distribution/pull_v1.go @@ -0,0 +1,454 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/url" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + sf *streamformatter.StreamFormatter + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ref reference.Named) (fallback bool, err error) { + if _, isDigested := ref.(reference.Digested); isDigested { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return true, registry.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return false, err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return true, err + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return true, err + } + if err := p.pullRepository(ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return false, err + } + out := p.config.OutStream + out.Write(p.sf.FormatStatus("", "%s: this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.", p.repoInfo.CanonicalName.Name())) + + return false, nil +} + +func (p *v1Puller) pullRepository(ref reference.Named) error { + out := p.config.OutStream + out.Write(p.sf.FormatStatus("", "Pulling repository %s", p.repoInfo.CanonicalName.Name())) + + repoData, err := p.session.GetRepositoryData(p.repoInfo.RemoteName) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName.Name()) + } + // Unexpected HTTP error + return err + } + + logrus.Debugf("Retrieving the tag list") + var tagsList map[string]string + tagged, isTagged := ref.(reference.Tagged) + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.RemoteName) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.RemoteName, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.CanonicalName.Name()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + errors := make(chan error) + layerDownloaded := make(chan struct{}) + + layersDownloaded := false + var wg sync.WaitGroup + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + wg.Add(1) + go func(img *registry.ImgData) { + p.downloadImage(out, repoData, img, layerDownloaded, errors) + wg.Done() + }(imgData) + } + + go func() { + wg.Wait() + close(errors) + }() + + var lastError error +selectLoop: + for { + select { + case err, ok := <-errors: + if !ok { + break selectLoop + } + lastError = err + case <-layerDownloaded: + layersDownloaded = true + } + } + + if lastError != nil { + return lastError + } + + localNameRef := p.repoInfo.LocalName + if isTagged { + localNameRef, err = reference.WithTag(localNameRef, tagged.Tag()) + if err != nil { + localNameRef = p.repoInfo.LocalName + } + } + writeStatus(localNameRef.String(), out, p.sf, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(out io.Writer, repoData *registry.RepositoryData, img *registry.ImgData, layerDownloaded chan struct{}, errors chan error) { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return + } + + localNameRef, err := reference.WithTag(p.repoInfo.LocalName, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + errors <- retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + errors <- err + return + } + + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName.Name()), nil)) + success := false + var lastErr error + var isDownloaded bool + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep), nil)) + if isDownloaded, err = p.pullImage(out, img.ID, ep, localNameRef); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep, err) + continue + } + if isDownloaded { + layerDownloaded <- struct{}{} + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep), nil)) + if isDownloaded, err = p.pullImage(out, img.ID, ep, localNameRef); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep, err), nil)) + continue + } + if isDownloaded { + layerDownloaded <- struct{}{} + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName.Name(), lastErr) + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil)) + errors <- err + return + } + out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) +} + +func (p *v1Puller) pullImage(out io.Writer, v1ID, endpoint string, localNameRef reference.Named) (layersDownloaded bool, err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return false, err + } + if len(history) < 1 { + return false, fmt.Errorf("empty history for image %s", v1ID) + } + out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Pulling dependent layers", nil)) + // FIXME: Try to stream the images? + // FIXME: Launch the getRemoteImage() in goroutines + + var ( + referencedLayers []layer.Layer + parentID layer.ChainID + newHistory []image.History + img *image.V1Image + imgJSON []byte + imgSize int64 + ) + + defer func() { + for _, l := range referencedLayers { + layer.ReleaseAndLog(p.config.LayerStore, l) + } + }() + + layersDownloaded = false + + // Iterate over layers from top-most to bottom-most, checking if any + // already exist on disk. + var i int + for i = 0; i != len(history); i++ { + v1LayerID := history[i] + // Do we have a mapping for this particular v1 ID on this + // registry? + if layerID, err := p.v1IDService.Get(v1LayerID, p.repoInfo.Index.Name); err == nil { + // Does the layer actually exist + if l, err := p.config.LayerStore.Get(layerID); err == nil { + for j := i; j >= 0; j-- { + logrus.Debugf("Layer already exists: %s", history[j]) + out.Write(p.sf.FormatProgress(stringid.TruncateID(history[j]), "Already exists", nil)) + } + referencedLayers = append(referencedLayers, l) + parentID = layerID + break + } + } + } + + needsDownload := i + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers, and download actual layer data if needed. + for i = len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(out, v1LayerID, endpoint) + if err != nil { + return layersDownloaded, err + } + + img = &image.V1Image{} + if err := json.Unmarshal(imgJSON, img); err != nil { + return layersDownloaded, err + } + + if i < needsDownload { + l, err := p.downloadLayer(out, v1LayerID, endpoint, parentID, imgSize, &layersDownloaded) + + // Note: This needs to be done even in the error case to avoid + // stale references to the layer. + if l != nil { + referencedLayers = append(referencedLayers, l) + } + if err != nil { + return layersDownloaded, err + } + + parentID = l.ChainID() + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return layersDownloaded, err + } + newHistory = append(newHistory, h) + } + + rootFS := image.NewRootFS() + l := referencedLayers[len(referencedLayers)-1] + for l != nil { + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + + config, err := v1.MakeConfigFromV1Config(imgJSON, rootFS, newHistory) + if err != nil { + return layersDownloaded, err + } + + imageID, err := p.config.ImageStore.Create(config) + if err != nil { + return layersDownloaded, err + } + + if err := p.config.TagStore.Add(localNameRef, imageID, true); err != nil { + return layersDownloaded, err + } + + return layersDownloaded, nil +} + +func (p *v1Puller) downloadLayerConfig(out io.Writer, v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + out.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Pulling metadata", nil)) + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + out.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Error pulling layer metadata", nil)) + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +func (p *v1Puller) downloadLayer(out io.Writer, v1LayerID, endpoint string, parentID layer.ChainID, layerSize int64, layersDownloaded *bool) (l layer.Layer, err error) { + // ensure no two downloads of the same layer happen at the same time + poolKey := "layer:" + v1LayerID + broadcaster, found := p.config.Pool.add(poolKey) + broadcaster.Add(out) + if found { + logrus.Debugf("Image (id: %s) pull is already running, skipping", v1LayerID) + if err = broadcaster.Wait(); err != nil { + return nil, err + } + layerID, err := p.v1IDService.Get(v1LayerID, p.repoInfo.Index.Name) + if err != nil { + return nil, err + } + // Does the layer actually exist + l, err := p.config.LayerStore.Get(layerID) + if err != nil { + return nil, err + } + return l, nil + } + + // This must use a closure so it captures the value of err when + // the function returns, not when the 'defer' is evaluated. + defer func() { + p.config.Pool.removeWithError(poolKey, err) + }() + + retries := 5 + for j := 1; j <= retries; j++ { + // Get the layer + status := "Pulling fs layer" + if j > 1 { + status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) + } + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), status, nil)) + layerReader, err := p.session.GetRemoteImageLayer(v1LayerID, endpoint, layerSize) + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Error pulling dependent layers", nil)) + return nil, err + } + *layersDownloaded = true + defer layerReader.Close() + + reader := progressreader.New(progressreader.Config{ + In: layerReader, + Out: broadcaster, + Formatter: p.sf, + Size: layerSize, + NewLines: false, + ID: stringid.TruncateID(v1LayerID), + Action: "Downloading", + }) + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + return nil, fmt.Errorf("could not get decompression stream: %v", err) + } + + l, err := p.config.LayerStore.Register(inflatedLayerData, parentID) + if err != nil { + return nil, fmt.Errorf("failed to register layer: %v", err) + } + logrus.Debugf("layer %s registered successfully", l.DiffID()) + + if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else if err != nil { + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Error downloading dependent layers", nil)) + return nil, err + } + + // Cache mapping from this v1 ID to content-addressable layer ID + if err := p.v1IDService.Set(v1LayerID, p.repoInfo.Index.Name, l.ChainID()); err != nil { + return nil, err + } + + broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Download complete", nil)) + broadcaster.Close() + return l, nil + } + + // not reached + return nil, nil +} diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go new file mode 100644 index 0000000000..b27970d2c8 --- /dev/null +++ b/distribution/pull_v2.go @@ -0,0 +1,512 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v2Puller struct { + blobSumService *metadata.BlobSumService + endpoint registry.APIEndpoint + config *ImagePullConfig + sf *streamformatter.StreamFormatter + repoInfo *registry.RepositoryInfo + repo distribution.Repository + sessionID string +} + +func (p *v2Puller) Pull(ref reference.Named) (fallback bool, err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return true, err + } + + p.sessionID = stringid.GenerateRandomID() + + if err := p.pullV2Repository(ref); err != nil { + if registry.ContinueOnError(err) { + logrus.Debugf("Error trying v2 registry: %v", err) + return true, err + } + return false, err + } + return false, nil +} + +func (p *v2Puller) pullV2Repository(ref reference.Named) (err error) { + var refs []reference.Named + taggedName := p.repoInfo.LocalName + if tagged, isTagged := ref.(reference.Tagged); isTagged { + taggedName, err = reference.WithTag(p.repoInfo.LocalName, tagged.Tag()) + if err != nil { + return err + } + refs = []reference.Named{taggedName} + } else if digested, isDigested := ref.(reference.Digested); isDigested { + taggedName, err = reference.WithDigest(p.repoInfo.LocalName, digested.Digest()) + if err != nil { + return err + } + refs = []reference.Named{taggedName} + } else { + manSvc, err := p.repo.Manifests(context.Background()) + if err != nil { + return err + } + + tags, err := manSvc.Tags() + if err != nil { + return err + } + + // This probably becomes a lot nicer after the manifest + // refactor... + for _, tag := range tags { + tagRef, err := reference.WithTag(p.repoInfo.LocalName, tag) + if err != nil { + return err + } + refs = append(refs, tagRef) + } + } + + var layersDownloaded bool + for _, pullRef := range refs { + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + pulledNew, err := p.pullV2Tag(p.config.OutStream, pullRef) + if err != nil { + return err + } + layersDownloaded = layersDownloaded || pulledNew + } + + writeStatus(taggedName.String(), p.config.OutStream, p.sf, layersDownloaded) + + return nil +} + +// downloadInfo is used to pass information from download to extractor +type downloadInfo struct { + tmpFile *os.File + digest digest.Digest + layer distribution.ReadSeekCloser + size int64 + err chan error + poolKey string + broadcaster *broadcaster.Buffered +} + +type errVerification struct{} + +func (errVerification) Error() string { return "verification failed" } + +func (p *v2Puller) download(di *downloadInfo) { + logrus.Debugf("pulling blob %q", di.digest) + + blobs := p.repo.Blobs(context.Background()) + + desc, err := blobs.Stat(context.Background(), di.digest) + if err != nil { + logrus.Debugf("Error statting layer: %v", err) + di.err <- err + return + } + di.size = desc.Size + + layerDownload, err := blobs.Open(context.Background(), di.digest) + if err != nil { + logrus.Debugf("Error fetching layer: %v", err) + di.err <- err + return + } + defer layerDownload.Close() + + verifier, err := digest.NewDigestVerifier(di.digest) + if err != nil { + di.err <- err + return + } + + digestStr := di.digest.String() + + reader := progressreader.New(progressreader.Config{ + In: ioutil.NopCloser(io.TeeReader(layerDownload, verifier)), + Out: di.broadcaster, + Formatter: p.sf, + Size: di.size, + NewLines: false, + ID: stringid.TruncateID(digestStr), + Action: "Downloading", + }) + io.Copy(di.tmpFile, reader) + + di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Verifying Checksum", nil)) + + if !verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest) + logrus.Error(err) + di.err <- err + return + } + + di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Download complete", nil)) + + logrus.Debugf("Downloaded %s to tempfile %s", digestStr, di.tmpFile.Name()) + di.layer = layerDownload + + di.err <- nil +} + +func (p *v2Puller) pullV2Tag(out io.Writer, ref reference.Named) (tagUpdated bool, err error) { + tagOrDigest := "" + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tagOrDigest = tagged.Tag() + } else if digested, isDigested := ref.(reference.Digested); isDigested { + tagOrDigest = digested.Digest().String() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) + } + + logrus.Debugf("Pulling ref from V2 registry: %q", tagOrDigest) + + manSvc, err := p.repo.Manifests(context.Background()) + if err != nil { + return false, err + } + + unverifiedManifest, err := manSvc.GetByTag(tagOrDigest) + if err != nil { + return false, err + } + if unverifiedManifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifyManifest(unverifiedManifest, ref) + if err != nil { + return false, err + } + + rootFS := image.NewRootFS() + + if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil { + return false, err + } + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return false, err + } + + out.Write(p.sf.FormatStatus(tagOrDigest, "Pulling from %s", p.repo.Name())) + + var downloads []*downloadInfo + + defer func() { + for _, d := range downloads { + p.config.Pool.removeWithError(d.poolKey, err) + if d.tmpFile != nil { + d.tmpFile.Close() + if err := os.RemoveAll(d.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name()) + } + } + } + }() + + // Image history converted to the new format + var history []image.History + + poolKey := "v2layer:" + notFoundLocally := false + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + poolKey += blobSum.String() + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return false, err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return false, err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + // Do we have a layer on disk corresponding to the set of + // blobsums up to this point? + if !notFoundLocally { + notFoundLocally = true + diffID, err := p.blobSumService.GetDiffID(blobSum) + if err == nil { + rootFS.Append(diffID) + if l, err := p.config.LayerStore.Get(rootFS.ChainID()); err == nil { + notFoundLocally = false + logrus.Debugf("Layer already exists: %s", blobSum.String()) + out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Already exists", nil)) + defer layer.ReleaseAndLog(p.config.LayerStore, l) + continue + } else { + rootFS.DiffIDs = rootFS.DiffIDs[:len(rootFS.DiffIDs)-1] + } + } + } + + out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Pulling fs layer", nil)) + + tmpFile, err := ioutil.TempFile("", "GetImageBlob") + if err != nil { + return false, err + } + + d := &downloadInfo{ + poolKey: poolKey, + digest: blobSum, + tmpFile: tmpFile, + // TODO: seems like this chan buffer solved hanging problem in go1.5, + // this can indicate some deeper problem that somehow we never take + // error from channel in loop below + err: make(chan error, 1), + } + + downloads = append(downloads, d) + + broadcaster, found := p.config.Pool.add(d.poolKey) + broadcaster.Add(out) + d.broadcaster = broadcaster + if found { + d.err <- nil + } else { + go p.download(d) + } + } + + for _, d := range downloads { + if err := <-d.err; err != nil { + return false, err + } + + if d.layer == nil { + // Wait for a different pull to download and extract + // this layer. + err = d.broadcaster.Wait() + if err != nil { + return false, err + } + + diffID, err := p.blobSumService.GetDiffID(d.digest) + if err != nil { + return false, err + } + rootFS.Append(diffID) + + l, err := p.config.LayerStore.Get(rootFS.ChainID()) + if err != nil { + return false, err + } + + defer layer.ReleaseAndLog(p.config.LayerStore, l) + + continue + } + + d.tmpFile.Seek(0, 0) + reader := progressreader.New(progressreader.Config{ + In: d.tmpFile, + Out: d.broadcaster, + Formatter: p.sf, + Size: d.size, + NewLines: false, + ID: stringid.TruncateID(d.digest.String()), + Action: "Extracting", + }) + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + return false, fmt.Errorf("could not get decompression stream: %v", err) + } + + l, err := p.config.LayerStore.Register(inflatedLayerData, rootFS.ChainID()) + if err != nil { + return false, fmt.Errorf("failed to register layer: %v", err) + } + logrus.Debugf("layer %s registered successfully", l.DiffID()) + rootFS.Append(l.DiffID()) + + // Cache mapping from this layer's DiffID to the blobsum + if err := p.blobSumService.Add(l.DiffID(), d.digest); err != nil { + return false, err + } + + defer layer.ReleaseAndLog(p.config.LayerStore, l) + + d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.digest.String()), "Pull complete", nil)) + d.broadcaster.Close() + tagUpdated = true + } + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), rootFS, history) + if err != nil { + return false, err + } + + imageID, err := p.config.ImageStore.Create(config) + if err != nil { + return false, err + } + + manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName.Name()) + if err != nil { + return false, err + } + + // Check for new tag if no layers downloaded + var oldTagImageID image.ID + if !tagUpdated { + oldTagImageID, err = p.config.TagStore.Get(ref) + if err != nil || oldTagImageID != imageID { + tagUpdated = true + } + } + + if tagUpdated { + if err = p.config.TagStore.Add(ref, imageID, true); err != nil { + return false, err + } + } + + if manifestDigest != "" { + out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest)) + } + + return tagUpdated, nil +} + +func verifyManifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isDigested := ref.(reference.Digested); isDigested { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return nil, err + } + payload, err := signedManifest.Payload() + if err != nil { + // If this failed, the signatures section was corrupted + // or missing. Treat the entire manifest as the payload. + payload = signedManifest.Raw + } + if _, err := verifier.Write(payload); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + + var verifiedManifest schema1.Manifest + if err = json.Unmarshal(payload, &verifiedManifest); err != nil { + return nil, err + } + m = &verifiedManifest + } else { + m = &signedManifest.Manifest + } + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("Invalid parent ID in the base layer of the image.") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} diff --git a/distribution/pull_v2_test.go b/distribution/pull_v2_test.go new file mode 100644 index 0000000000..2647911408 --- /dev/null +++ b/distribution/pull_v2_test.go @@ -0,0 +1,174 @@ +package distribution + +import ( + "encoding/json" + "io/ioutil" + "reflect" + "strings" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" +) + +// TestFixManifestLayers checks that fixManifestLayers removes a duplicate +// layer, and that it makes no changes to the manifest when called a second +// time, after the duplicate is removed. +func TestFixManifestLayers(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + duplicateLayerManifestExpectedOutput := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") + } + + // Run fixManifestLayers again and confirm that it doesn't change the + // manifest (which no longer has duplicate layers). + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") + } +} + +// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails +// if the base layer configuration specifies a parent. +func TestFixManifestLayersBaseLayerParent(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID in the base layer of the image.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails +// if an image configuration specifies a parent that doesn't directly follow +// that (deduplicated) image in the image history. +func TestFixManifestLayersBadParent(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestValidateManifest verifies the validateManifest function +func TestValidateManifest(t *testing.T) { + expectedDigest, err := reference.Parse("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") + if err != nil { + t.Fatal("could not parse reference") + } + expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + + // Good manifest + + goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var goodSignedManifest schema1.SignedManifest + err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err := verifyManifest(&goodSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in good manifest") + } + + // "Extra data" manifest + + extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var extraDataSignedManifest schema1.SignedManifest + err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifyManifest(&extraDataSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in extra data manifest") + } + + // Bad manifest + + badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var badSignedManifest schema1.SignedManifest + err = json.Unmarshal(badManifestBytes, &badSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifyManifest(&badSignedManifest, expectedDigest) + if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { + t.Fatal("expected validateManifest to fail with digest error") + } +} diff --git a/distribution/pull_v2_unix.go b/distribution/pull_v2_unix.go new file mode 100644 index 0000000000..9fbb875efc --- /dev/null +++ b/distribution/pull_v2_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/image" +) + +func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { + return nil +} diff --git a/distribution/pull_v2_windows.go b/distribution/pull_v2_windows.go new file mode 100644 index 0000000000..de99fc9d48 --- /dev/null +++ b/distribution/pull_v2_windows.go @@ -0,0 +1,29 @@ +// +build windows + +package distribution + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/image" +) + +func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { + v1img := &image.V1Image{} + if err := json.Unmarshal([]byte(m.History[len(m.History)-1].V1Compatibility), v1img); err != nil { + return err + } + if v1img.Parent == "" { + return fmt.Errorf("Last layer %q does not have a base layer reference", v1img.ID) + } + // There must be an image that already references the baselayer. + for _, img := range is.Map() { + if img.RootFS.BaseLayerID() == v1img.Parent { + rootFS.BaseLayer = img.RootFS.BaseLayer + return nil + } + } + return fmt.Errorf("Invalid base layer %q", v1img.Parent) +} diff --git a/distribution/push.go b/distribution/push.go new file mode 100644 index 0000000000..ee41c2e1e3 --- /dev/null +++ b/distribution/push.go @@ -0,0 +1,179 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/registry" + "github.com/docker/docker/tag" + "github.com/docker/libtrust" +) + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + // MetaHeaders store HTTP headers with metadata about the image + // (DockerHeaders with prefix X-Meta- in the request). + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *cliconfig.AuthConfig + // OutStream is the output writer for showing the status of the push + // operation. + OutStream io.Writer + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService *registry.Service + // EventsService is the events service to use for logging. + EventsService *events.Events + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // LayerStore manges layers. + LayerStore layer.Store + // ImageStore manages images. + ImageStore image.Store + // TagStore manages tags. + TagStore tag.Store + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey +} + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push() (fallback bool, err error) +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig, sf *streamformatter.StreamFormatter) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + blobSumService: metadata.NewBlobSumService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + sf: sf, + layersPushed: make(map[digest.Digest]bool), + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + sf: sf, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on the repository named localName. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + var sf = streamformatter.NewJSONStreamFormatter() + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.CanonicalName) + if err != nil { + return err + } + + imagePushConfig.OutStream.Write(sf.FormatStatus("", "The push refers to a repository [%s]", repoInfo.CanonicalName)) + + associations := imagePushConfig.TagStore.ReferencesByName(repoInfo.LocalName) + if len(associations) == 0 { + return fmt.Errorf("Repository does not exist: %s", repoInfo.LocalName) + } + + var lastErr error + for _, endpoint := range endpoints { + logrus.Debugf("Trying to push %s to %s %s", repoInfo.CanonicalName, endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig, sf) + if err != nil { + lastErr = err + continue + } + if fallback, err := pusher.Push(); err != nil { + if fallback { + lastErr = err + continue + } + logrus.Debugf("Not continuing with error: %v", err) + return err + + } + + imagePushConfig.EventsService.Log("push", repoInfo.LocalName.Name(), "") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.CanonicalName) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +func compress(in io.Reader) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + }() + + return pipeReader +} diff --git a/distribution/push_v1.go b/distribution/push_v1.go new file mode 100644 index 0000000000..f6ffbb4445 --- /dev/null +++ b/distribution/push_v1.go @@ -0,0 +1,466 @@ +package distribution + +import ( + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" +) + +type v1Pusher struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + sf *streamformatter.StreamFormatter + session *registry.Session + + out io.Writer +} + +func (p *v1Pusher) Push() (fallback bool, err error) { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return false, err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return true, err + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return true, err + } + if err := p.pushRepository(); err != nil { + // TODO(dmcgowan): Check if should fallback + return false, err + } + return false, nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := digest.Digest(imageID).Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { + v1ID := digest.Digest(l.ChainID()).Hex() + + config := "" + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + }, nil +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []layer.Layer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + _, isDigested := p.ref.(reference.Digested) + if isDigested { + return + } + + tagged, isTagged := p.ref.(reference.Tagged) + if isTagged { + // Push a specific tag + var imgID image.ID + imgID, err = p.config.TagStore.Get(p.ref) + if err != nil { + return + } + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[image.ID]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.TagStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.Tagged); !isTagged { + // Ignore digest references. + continue + } + + tagsByImage[association.ImageID] = append(tagsByImage[association.ImageID], tagged.Tag()) + + if _, present := imagesSeen[association.ImageID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ImageID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(association.ImageID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]layer.Layer) (imageListForThisTag []v1Image, err error) { + img, err := p.config.ImageStore.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + + var l layer.Layer + if topLayerID == "" { + l = layer.EmptyLayer + } else { + l, err = p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, l) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + } + + dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) + if err != nil { + return nil, err + } + + topImage, err := newV1TopImage(imgID, img, l, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { + if l == nil { + return nil, nil, nil + } + + imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage, nil + } + } + + dependencyImage, err := newV1DependencyImage(l, parent) + if err != nil { + return nil, nil, err + } + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage, nil +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + } else { + p.out.Write(p.sf.FormatStatus("", "Image %s already pushed, skipping", stringid.TruncateID(v1ID))) + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + p.out.Write(p.sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName.Name()+"/tags/"+tag)) + if err := p.session.PushRegistryTag(p.repoInfo.RemoteName, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository() error { + p.out = ioutils.NewWriteFlusher(p.config.OutStream) + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + p.config.LayerStore.Release(l) + } + }() + if err != nil { + return err + } + p.out.Write(p.sf.FormatStatus("", "Sending image list")) + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo.RemoteName, imageIndex, false, nil) + if err != nil { + return err + } + p.out.Write(p.sf.FormatStatus("", "Pushing repository %s", p.repoInfo.CanonicalName)) + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo.RemoteName, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(v1Image v1Image, ep string) (checksum string, err error) { + v1ID := v1Image.V1ID() + + jsonRaw := v1Image.Config() + p.out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Pushing", nil)) + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + p.out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Image already pushed, skipping", nil)) + return "", nil + } + return "", err + } + + l := v1Image.Layer() + + arch, err := l.TarStream() + if err != nil { + return "", err + } + + // don't care if this fails; best effort + size, _ := l.Size() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progressreader.New(progressreader.Config{ + In: ioutil.NopCloser(arch), + Out: p.out, + Formatter: p.sf, + Size: size, + NewLines: false, + ID: stringid.TruncateID(v1ID), + Action: "Pushing", + }) + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.ChainID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + p.out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Image successfully pushed", nil)) + return imgData.Checksum, nil +} diff --git a/distribution/push_v2.go b/distribution/push_v2.go new file mode 100644 index 0000000000..15edf2e2b3 --- /dev/null +++ b/distribution/push_v2.go @@ -0,0 +1,410 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/docker/docker/tag" + "golang.org/x/net/context" +) + +type v2Pusher struct { + blobSumService *metadata.BlobSumService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + sf *streamformatter.StreamFormatter + repo distribution.Repository + + // layersPushed is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. + layersPushed map[digest.Digest]bool +} + +func (p *v2Pusher) Push() (fallback bool, err error) { + p.repo, err = NewV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return true, err + } + + localName := p.repoInfo.LocalName.Name() + + var associations []tag.Association + if _, isTagged := p.ref.(reference.Tagged); isTagged { + imageID, err := p.config.TagStore.Get(p.ref) + if err != nil { + return false, fmt.Errorf("tag does not exist: %s", p.ref.String()) + } + + associations = []tag.Association{ + { + Ref: p.ref, + ImageID: imageID, + }, + } + } else { + // Pull all tags + associations = p.config.TagStore.ReferencesByName(p.ref) + } + if err != nil { + return false, fmt.Errorf("error getting tags for %s: %s", localName, err) + } + if len(associations) == 0 { + return false, fmt.Errorf("no tags to push for %s", localName) + } + + for _, association := range associations { + if err := p.pushV2Tag(association); err != nil { + return false, err + } + } + + return false, nil +} + +func (p *v2Pusher) pushV2Tag(association tag.Association) error { + ref := association.Ref + logrus.Debugf("Pushing repository: %s", ref.String()) + + img, err := p.config.ImageStore.Get(association.ImageID) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) + } + + out := p.config.OutStream + + var l layer.Layer + + topLayerID := img.RootFS.ChainID() + if topLayerID == "" { + l = layer.EmptyLayer + } else { + l, err = p.config.LayerStore.Get(topLayerID) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer layer.ReleaseAndLog(p.config.LayerStore, l) + } + + fsLayers := make(map[layer.DiffID]schema1.FSLayer) + + // Push empty layer if necessary + for _, h := range img.History { + if h.EmptyLayer { + dgst, err := p.pushLayerIfNecessary(out, layer.EmptyLayer) + if err != nil { + return err + } + p.layersPushed[dgst] = true + fsLayers[layer.EmptyLayer.DiffID()] = schema1.FSLayer{BlobSum: dgst} + break + } + } + + for i := 0; i < len(img.RootFS.DiffIDs); i++ { + dgst, err := p.pushLayerIfNecessary(out, l) + if err != nil { + return err + } + + p.layersPushed[dgst] = true + fsLayers[l.DiffID()] = schema1.FSLayer{BlobSum: dgst} + + l = l.Parent() + } + + var tag string + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + m, err := CreateV2Manifest(p.repo.Name(), tag, img, fsLayers) + if err != nil { + return err + } + + logrus.Infof("Signed manifest for %s using daemon's key: %s", ref.String(), p.config.TrustKey.KeyID()) + signed, err := schema1.Sign(m, p.config.TrustKey) + if err != nil { + return err + } + + manifestDigest, manifestSize, err := digestFromManifest(signed, p.repo.Name()) + if err != nil { + return err + } + if manifestDigest != "" { + if tagged, isTagged := ref.(reference.Tagged); isTagged { + // NOTE: do not change this format without first changing the trust client + // code. This information is used to determine what was pushed and should be signed. + out.Write(p.sf.FormatStatus("", "%s: digest: %s size: %d", tagged.Tag(), manifestDigest, manifestSize)) + } + } + + manSvc, err := p.repo.Manifests(context.Background()) + if err != nil { + return err + } + return manSvc.Put(signed) +} + +func (p *v2Pusher) pushLayerIfNecessary(out io.Writer, l layer.Layer) (digest.Digest, error) { + logrus.Debugf("Pushing layer: %s", l.DiffID()) + + // Do we have any blobsums associated with this layer's DiffID? + possibleBlobsums, err := p.blobSumService.GetBlobSums(l.DiffID()) + if err == nil { + dgst, exists, err := p.blobSumAlreadyExists(possibleBlobsums) + if err != nil { + out.Write(p.sf.FormatProgress(stringid.TruncateID(string(l.DiffID())), "Image push failed", nil)) + return "", err + } + if exists { + out.Write(p.sf.FormatProgress(stringid.TruncateID(string(l.DiffID())), "Layer already exists", nil)) + return dgst, nil + } + } + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + pushDigest, err := p.pushV2Layer(p.repo.Blobs(context.Background()), l) + if err != nil { + return "", err + } + // Cache mapping from this layer's DiffID to the blobsum + if err := p.blobSumService.Add(l.DiffID(), pushDigest); err != nil { + return "", err + } + + return pushDigest, nil +} + +// blobSumAlreadyExists checks if the registry already know about any of the +// blobsums passed in the "blobsums" slice. If it finds one that the registry +// knows about, it returns the known digest and "true". +func (p *v2Pusher) blobSumAlreadyExists(blobsums []digest.Digest) (digest.Digest, bool, error) { + for _, dgst := range blobsums { + if p.layersPushed[dgst] { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + return dgst, true, nil + } + _, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst) + switch err { + case nil: + return dgst, true, nil + case distribution.ErrBlobUnknown: + // nop + default: + return "", false, err + } + } + return "", false, nil +} + +// CreateV2Manifest creates a V2 manifest from an image config and set of +// FSLayer digests. +// FIXME: This should be moved to the distribution repo, since it will also +// be useful for converting new manifests to the old format. +func CreateV2Manifest(name, tag string, img *image.Image, fsLayers map[layer.DiffID]schema1.FSLayer) (*schema1.Manifest, error) { + if len(img.History) == 0 { + return nil, errors.New("empty history when trying to create V2 manifest") + } + + // Generate IDs for each layer + // For non-top-level layers, create fake V1Compatibility strings that + // fit the format and don't collide with anything else, but don't + // result in runnable images on their own. + type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + } + + fsLayerList := make([]schema1.FSLayer, len(img.History)) + history := make([]schema1.History, len(img.History)) + + parent := "" + layerCounter := 0 + for i, h := range img.History { + if i == len(img.History)-1 { + break + } + + var diffID layer.DiffID + if h.EmptyLayer { + diffID = layer.EmptyLayer.DiffID() + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + diffID = img.RootFS.DiffIDs[layerCounter] + layerCounter++ + } + + fsLayer, present := fsLayers[diffID] + if !present { + return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String()) + } + dgst, err := digest.FromBytes([]byte(fsLayer.BlobSum.Hex() + " " + parent)) + if err != nil { + return nil, err + } + v1ID := dgst.Hex() + + v1Compatibility := v1Compatibility{ + ID: v1ID, + Parent: parent, + Comment: h.Comment, + Created: h.Created, + } + v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} + if h.EmptyLayer { + v1Compatibility.ThrowAway = true + } + jsonBytes, err := json.Marshal(&v1Compatibility) + if err != nil { + return nil, err + } + + reversedIndex := len(img.History) - i - 1 + history[reversedIndex].V1Compatibility = string(jsonBytes) + fsLayerList[reversedIndex] = fsLayer + + parent = v1ID + } + + latestHistory := img.History[len(img.History)-1] + + var diffID layer.DiffID + if latestHistory.EmptyLayer { + diffID = layer.EmptyLayer.DiffID() + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + diffID = img.RootFS.DiffIDs[layerCounter] + } + fsLayer, present := fsLayers[diffID] + if !present { + return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String()) + } + + dgst, err := digest.FromBytes([]byte(fsLayer.BlobSum.Hex() + " " + parent + " " + string(img.RawJSON()))) + if err != nil { + return nil, err + } + fsLayerList[0] = fsLayer + + // Top-level v1compatibility string should be a modified version of the + // image config. + transformedConfig, err := v1.MakeV1ConfigFromConfig(img, dgst.Hex(), parent, latestHistory.EmptyLayer) + if err != nil { + return nil, err + } + + history[0].V1Compatibility = string(transformedConfig) + + // windows-only baselayer setup + if err := setupBaseLayer(history, *img.RootFS); err != nil { + return nil, err + } + + return &schema1.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: name, + Tag: tag, + Architecture: img.Architecture, + FSLayers: fsLayerList, + History: history, + }, nil +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +func (p *v2Pusher) pushV2Layer(bs distribution.BlobService, l layer.Layer) (digest.Digest, error) { + out := p.config.OutStream + displayID := stringid.TruncateID(string(l.DiffID())) + + out.Write(p.sf.FormatProgress(displayID, "Preparing", nil)) + + arch, err := l.TarStream() + if err != nil { + return "", err + } + + // Send the layer + layerUpload, err := bs.Create(context.Background()) + if err != nil { + return "", err + } + defer layerUpload.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + reader := progressreader.New(progressreader.Config{ + In: ioutil.NopCloser(arch), // we'll take care of close here. + Out: out, + Formatter: p.sf, + Size: size, + NewLines: false, + ID: displayID, + Action: "Pushing", + }) + + compressedReader := compress(reader) + + digester := digest.Canonical.New() + tee := io.TeeReader(compressedReader, digester.Hash()) + + out.Write(p.sf.FormatProgress(displayID, "Pushing", nil)) + nn, err := layerUpload.ReadFrom(tee) + compressedReader.Close() + if err != nil { + return "", err + } + + dgst := digester.Digest() + if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil { + return "", err + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", l.DiffID(), dgst, nn) + out.Write(p.sf.FormatProgress(displayID, "Pushed", nil)) + + return dgst, nil +} diff --git a/distribution/push_v2_test.go b/distribution/push_v2_test.go new file mode 100644 index 0000000000..ab9e6612c6 --- /dev/null +++ b/distribution/push_v2_test.go @@ -0,0 +1,176 @@ +package distribution + +import ( + "reflect" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" +) + +func TestCreateV2Manifest(t *testing.T) { + imgJSON := `{ + "architecture": "amd64", + "config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "echo hi" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", + "container_config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "created": "2015-11-04T23:06:32.365666163Z", + "docker_version": "1.9.0-dev", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + }, + { + "created": "2015-11-04T23:06:30.934316144Z", + "created_by": "/bin/sh -c #(nop) ENV derived=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:31.192097572Z", + "created_by": "/bin/sh -c #(nop) ENV asdf=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:32.083868454Z", + "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" + }, + { + "created": "2015-11-04T23:06:32.365666163Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", + "empty_layer": true + } + ], + "os": "linux", + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" + ], + "type": "layers" + } +}` + + // To fill in rawJSON + img, err := image.NewFromJSON([]byte(imgJSON)) + if err != nil { + t.Fatalf("json decoding failed: %v", err) + } + + fsLayers := map[layer.DiffID]schema1.FSLayer{ + layer.DiffID("sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1"): {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + layer.DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"): {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + layer.DiffID("sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"): {BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + manifest, err := CreateV2Manifest("testrepo", "testtag", img, fsLayers) + if err != nil { + t.Fatalf("CreateV2Manifest returned error: %v", err) + } + + if manifest.Versioned.SchemaVersion != 1 { + t.Fatal("SchemaVersion != 1") + } + if manifest.Name != "testrepo" { + t.Fatal("incorrect name in manifest") + } + if manifest.Tag != "testtag" { + t.Fatal("incorrect tag in manifest") + } + if manifest.Architecture != "amd64" { + t.Fatal("incorrect arch in manifest") + } + + expectedFSLayers := []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + if len(manifest.FSLayers) != len(expectedFSLayers) { + t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers)) + } + if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) { + t.Fatal("wrong FSLayers list") + } + + expectedV1Compatibility := []string{ + `{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"d728140d3fd23dfcac505954af0b2224b3579b177029eded62916579eb19ac64","os":"linux","parent":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","throwaway":true}`, + `{"id":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","parent":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]}}`, + `{"id":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","parent":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`, + `{"id":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`, + `{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`, + `{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`, + } + + if len(manifest.History) != len(expectedV1Compatibility) { + t.Fatalf("wrong number of history entries: %d", len(manifest.History)) + } + for i := range expectedV1Compatibility { + if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] { + t.Fatalf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility) + } + } +} diff --git a/distribution/push_v2_unix.go b/distribution/push_v2_unix.go new file mode 100644 index 0000000000..345085657e --- /dev/null +++ b/distribution/push_v2_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/image" +) + +func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error { + return nil +} diff --git a/distribution/push_v2_windows.go b/distribution/push_v2_windows.go new file mode 100644 index 0000000000..59ad6545f6 --- /dev/null +++ b/distribution/push_v2_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package distribution + +import ( + "encoding/json" + + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/image" +) + +func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error { + var v1Config map[string]*json.RawMessage + if err := json.Unmarshal([]byte(history[len(history)-1].V1Compatibility), &v1Config); err != nil { + return err + } + baseID, err := json.Marshal(rootFS.BaseLayerID()) + if err != nil { + return err + } + v1Config["parent"] = (*json.RawMessage)(&baseID) + configJSON, err := json.Marshal(v1Config) + if err != nil { + return err + } + history[len(history)-1].V1Compatibility = string(configJSON) + return nil +} diff --git a/distribution/registry.go b/distribution/registry.go new file mode 100644 index 0000000000..5ec45530bc --- /dev/null +++ b/distribution/registry.go @@ -0,0 +1,115 @@ +package distribution + +import ( + "errors" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type dumbCredentialStore struct { + auth *cliconfig.AuthConfig +} + +func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) { + return dcs.auth.Username, dcs.auth.Password +} + +// NewV2Repository returns a repository (v2 only). It creates a HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *cliconfig.AuthConfig, actions ...string) (distribution.Repository, error) { + ctx := context.Background() + + repoName := repoInfo.CanonicalName + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = repoInfo.RemoteName + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + modifiers := registry.DockerHeaders(metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + pingClient := &http.Client{ + Transport: authTransport, + Timeout: 5 * time.Second, + } + endpointStr := strings.TrimRight(endpoint.URL, "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + versions := auth.APIVersions(resp, endpoint.VersionHeader) + if endpoint.VersionHeader != "" && len(endpoint.Versions) > 0 { + var foundVersion bool + for _, version := range endpoint.Versions { + for _, pingVersion := range versions { + if version == pingVersion { + foundVersion = true + } + } + } + if !foundVersion { + return nil, errors.New("endpoint does not support v2 API") + } + } + + challengeManager := auth.NewSimpleChallengeManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, err + } + + creds := dumbCredentialStore{auth: authConfig} + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName.Name(), actions...) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + tr := transport.NewTransport(base, modifiers...) + + return client.NewRepository(ctx, repoName.Name(), endpoint.URL, tr) +} + +func digestFromManifest(m *schema1.SignedManifest, localName string) (digest.Digest, int, error) { + payload, err := m.Payload() + if err != nil { + // If this failed, the signatures section was corrupted + // or missing. Treat the entire manifest as the payload. + payload = m.Raw + } + manifestDigest, err := digest.FromBytes(payload) + if err != nil { + logrus.Infof("Could not compute manifest digest for %s:%s : %v", localName, m.Tag, err) + } + return manifestDigest, len(payload), nil +} From 5fc0e1f324b05ce8cc7536cd807995c629e0843d Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Wed, 18 Nov 2015 14:19:45 -0800 Subject: [PATCH 5/7] Add migratev1 package Signed-off-by: Tonis Tiigi --- migrate/v1/migratev1.go | 364 ++++++++++++++++++++++++++++++++ migrate/v1/migratev1_test.go | 389 +++++++++++++++++++++++++++++++++++ 2 files changed, 753 insertions(+) create mode 100644 migrate/v1/migratev1.go create mode 100644 migrate/v1/migratev1_test.go diff --git a/migrate/v1/migratev1.go b/migrate/v1/migratev1.go new file mode 100644 index 0000000000..86d99e6d06 --- /dev/null +++ b/migrate/v1/migratev1.go @@ -0,0 +1,364 @@ +package v1 + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "encoding/json" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + imagev1 "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/tag" +) + +type graphIDRegistrar interface { + RegisterByGraphID(string, layer.ChainID, string) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type graphIDMounter interface { + MountByGraphID(string, string, layer.ChainID) (layer.RWLayer, error) + Unmount(string) error +} + +const ( + graphDirName = "graph" + tarDataFileName = "tar-data.json.gz" + migrationFileName = ".migration-v1-images.json" + migrationTagsFileName = ".migration-v1-tags" + containersDirName = "containers" + configFileNameLegacy = "config.json" + configFileName = "config.v2.json" + repositoriesFilePrefixLegacy = "repositories-" +) + +var ( + errUnsupported = errors.New("migration is not supported") +) + +// Migrate takes an old graph directory and transforms the metadata into the +// new format. +func Migrate(root, driverName string, ls layer.Store, is image.Store, ts tag.Store, ms metadata.Store) error { + mappings := make(map[string]image.ID) + + if registrar, ok := ls.(graphIDRegistrar); !ok { + return errUnsupported + } else if err := migrateImages(root, registrar, is, ms, mappings); err != nil { + return err + } + + if mounter, ok := ls.(graphIDMounter); !ok { + return errUnsupported + } else if err := migrateContainers(root, mounter, is, mappings); err != nil { + return err + } + + if err := migrateTags(root, driverName, ts, mappings); err != nil { + return err + } + + return nil +} + +func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error { + graphDir := filepath.Join(root, graphDirName) + if _, err := os.Lstat(graphDir); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + mfile := filepath.Join(root, migrationFileName) + f, err := os.Open(mfile) + if err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + err := json.NewDecoder(f).Decode(&mappings) + if err != nil { + f.Close() + return err + } + f.Close() + } + + dir, err := ioutil.ReadDir(graphDir) + if err != nil { + return err + } + for _, v := range dir { + v1ID := v.Name() + if err := imagev1.ValidateID(v1ID); err != nil { + continue + } + if _, exists := mappings[v1ID]; exists { + continue + } + if err := migrateImage(v1ID, root, ls, is, ms, mappings); err != nil { + continue + } + } + + f, err = os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(mappings); err != nil { + return err + } + + return nil +} + +func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMappings map[string]image.ID) error { + containersDir := filepath.Join(root, containersDirName) + dir, err := ioutil.ReadDir(containersDir) + if err != nil { + return err + } + for _, v := range dir { + id := v.Name() + + if _, err := os.Stat(filepath.Join(containersDir, id, configFileName)); err == nil { + continue + } + + containerJSON, err := ioutil.ReadFile(filepath.Join(containersDir, id, configFileNameLegacy)) + if err != nil { + return err + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(containerJSON, &c); err != nil { + return err + } + + imageStrJSON, ok := c["Image"] + if !ok { + return fmt.Errorf("invalid container configuration for %v", id) + } + + var image string + if err := json.Unmarshal([]byte(*imageStrJSON), &image); err != nil { + return err + } + imageID, ok := imageMappings[image] + if !ok { + logrus.Errorf("image not migrated %v", imageID) // non-fatal error + continue + } + + c["Image"] = rawJSON(imageID) + + containerJSON, err = json.Marshal(c) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(containersDir, id, configFileName), containerJSON, 0600); err != nil { + return err + } + + img, err := is.Get(imageID) + if err != nil { + return err + } + + _, err = ls.MountByGraphID(id, id, img.RootFS.ChainID()) + if err != nil { + return err + } + + err = ls.Unmount(id) + if err != nil { + return err + } + + logrus.Infof("migrated container %s to point to %s", id, imageID) + + } + return nil +} + +type tagAdder interface { + Add(ref reference.Named, id image.ID, force bool) error +} + +func migrateTags(root, driverName string, ts tagAdder, mappings map[string]image.ID) error { + migrationFile := filepath.Join(root, migrationTagsFileName) + if _, err := os.Lstat(migrationFile); !os.IsNotExist(err) { + return err + } + + type repositories struct { + Repositories map[string]map[string]string + } + + var repos repositories + + f, err := os.Open(filepath.Join(root, repositoriesFilePrefixLegacy+driverName)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&repos); err != nil { + return err + } + + for name, repo := range repos.Repositories { + for tag, id := range repo { + if strongID, exists := mappings[id]; exists { + ref, err := reference.WithName(name) + if err != nil { + logrus.Errorf("migrate tags: invalid name %q, %q", name, err) + continue + } + if dgst, err := digest.ParseDigest(tag); err == nil { + ref, err = reference.WithDigest(ref, dgst) + if err != nil { + logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err) + continue + } + } else { + ref, err = reference.WithTag(ref, tag) + if err != nil { + logrus.Errorf("migrate tags: invalid tag %q, %q", tag, err) + continue + } + } + if err := ts.Add(ref, strongID, false); err != nil { + logrus.Errorf("can't migrate tag %q for %q, err: %q", ref.String(), strongID, err) + } + logrus.Infof("migrated tag %s:%s to point to %s", name, tag, strongID) + } + } + } + + mf, err := os.Create(migrationFile) + if err != nil { + return err + } + mf.Close() + + return nil +} + +func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) { + defer func() { + if err != nil { + logrus.Errorf("migration failed for %v, err: %v", id, err) + } + }() + + jsonFile := filepath.Join(root, graphDirName, id, "json") + imageJSON, err := ioutil.ReadFile(jsonFile) + if err != nil { + return err + } + var parent struct { + Parent string + ParentID digest.Digest `json:"parent_id"` + } + if err := json.Unmarshal(imageJSON, &parent); err != nil { + return err + } + if parent.Parent == "" && parent.ParentID != "" { // v1.9 + parent.Parent = parent.ParentID.Hex() + } + // compatibilityID for parent + parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "parent")) + if err == nil && len(parentCompatibilityID) > 0 { + parent.Parent = string(parentCompatibilityID) + } + + var parentID image.ID + if parent.Parent != "" { + var exists bool + if parentID, exists = mappings[parent.Parent]; !exists { + if err := migrateImage(parent.Parent, root, ls, is, ms, mappings); err != nil { + // todo: fail or allow broken chains? + return err + } + parentID = mappings[parent.Parent] + } + } + + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), filepath.Join(filepath.Join(root, graphDirName, id, tarDataFileName))) + if err != nil { + return err + } + logrus.Infof("migrated layer %s to %s", id, layer.DiffID()) + + h, err := imagev1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + rootFS.Append(layer.DiffID()) + + config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + strongID, err := is.Create(config) + if err != nil { + return err + } + logrus.Infof("migrated image %s to %s", id, strongID) + + if parentID != "" { + if err := is.SetParent(strongID, parentID); err != nil { + return err + } + } + + checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum")) + if err == nil { // best effort + dgst, err := digest.ParseDigest(string(checksum)) + if err == nil { + blobSumService := metadata.NewBlobSumService(ms) + blobSumService.Add(layer.DiffID(), dgst) + } + } + _, err = ls.Release(layer) + if err != nil { + return err + } + + mappings[id] = strongID + return +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/migrate/v1/migratev1_test.go b/migrate/v1/migratev1_test.go new file mode 100644 index 0000000000..48d2a23411 --- /dev/null +++ b/migrate/v1/migratev1_test.go @@ -0,0 +1,389 @@ +package v1 + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" +) + +func TestMigrateTags(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-tags") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108","sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"},"registry":{"2":"5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d","latest":"8d5547a9f329b1d3f93198cd661fb5117e5a96b721c5cf9a2c389e7dd4877128"}}}`), 0600) + + ta := &mockTagAdder{} + err = migrateTags(tmpdir, "generic", ta, map[string]image.ID{ + "5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d": image.ID("sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + "abcdef3434c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:56434342345ae68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + }) + if err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + "busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", + } + + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } + + // second migration is no-op + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"`), 0600) + err = migrateTags(tmpdir, "generic", ta, map[string]image.ID{ + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + }) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } +} + +func TestMigrateContainers(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-containers") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + // container with invalid image + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"e780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"4c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + ls := &mockMounter{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, ls) + if err != nil { + t.Fatal(err) + } + + imgID, err := is.Create([]byte(`{"architecture":"amd64","config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["sh"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","history":[{"created":"2015-10-31T22:22:54.690851953Z","created_by":"/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"},{"created":"2015-10-31T22:22:55.613815829Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"]}}`)) + if err != nil { + t.Fatal(err) + } + + err = migrateContainers(tmpdir, ls, is, map[string]image.ID{ + "2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093": imgID, + }) + if err != nil { + t.Fatal(err) + } + + expected := []mountInfo{{ + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "sha256:c3191d32a37d7159b2e30830937d2e30268ad6c375a773a8994911a3aba9b93f", + }} + if !reflect.DeepEqual(expected, ls.mounts) { + t.Fatalf("invalid mounts: expected %q, got %q", expected, ls.mounts) + } + + if actual, expected := ls.count, 0; actual != expected { + t.Fatalf("invalid active mounts: expected %d, got %d", expected, actual) + } + + config2, err := ioutil.ReadFile(filepath.Join(tmpdir, "containers", "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", "config.v2.json")) + if err != nil { + t.Fatal(err) + } + var config struct{ Image string } + err = json.Unmarshal(config2, &config) + if err != nil { + t.Fatal(err) + } + + if actual, expected := config.Image, string(imgID); actual != expected { + t.Fatalf("invalid image pointer in migrated config: expected %q, got %q", expected, actual) + } + +} + +func TestMigrateImages(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-images") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // busybox from 1.9 + id1, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:54.690851953Z","docker_version":"1.8.2","layer_id":"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57","os":"linux"}`, "", "") + if err != nil { + t.Fatal(err) + } + + id2, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","layer_id":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4","os":"linux","parent_id":"sha256:039b63dd2cbaa10d6015ea574392530571ed8d7b174090f032211285a71881d0"}`, id1, "") + if err != nil { + t.Fatal(err) + } + + ls := &mockRegistrar{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, ls) + if err != nil { + t.Fatal(err) + } + + ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution")) + if err != nil { + t.Fatal(err) + } + mappings := make(map[string]image.ID) + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected := map[string]image.ID{ + id1: image.ID("sha256:ca406eaf9c26898414ff5b7b3a023c33310759d6203be0663dbf1b3a712f432d"), + id2: image.ID("sha256:a488bec94bb96b26a968f913d25ef7d8d204d727ca328b52b4b059c7d03260b6"), + } + + if !reflect.DeepEqual(mappings, expected) { + t.Fatalf("invalid image mappings: expected %q, got %q", expected, mappings) + } + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + ls.count = 0 + + // next images are busybox from 1.8.2 + _, err = addImage(tmpdir, `{"id":"17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2","parent":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:55.613815829Z","container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":0}`, "", "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + if err != nil { + t.Fatal(err) + } + + _, err = addImage(tmpdir, `{"id":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:54.690851953Z","container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":1108935}`, "", "sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57") + if err != nil { + t.Fatal(err) + } + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected["d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498"] = image.ID("sha256:c091bb33854e57e6902b74c08719856d30b5593c7db6143b2b48376b8a588395") + expected["17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2"] = image.ID("sha256:d963020e755ff2715b936065949472c1f8a6300144b922992a1a421999e71f07") + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + + blobSumService := metadata.NewBlobSumService(ms) + blobsums, err := blobSumService.GetBlobSums(layer.EmptyLayer.DiffID()) + if err != nil { + t.Fatal(err) + } + + expectedBlobsums := []digest.Digest{ + "sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57", + "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", + } + + if !reflect.DeepEqual(expectedBlobsums, blobsums) { + t.Fatalf("invalid blobsums: expected %q, got %q", expectedBlobsums, blobsums) + } + +} + +func TestMigrateUnsupported(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-empty") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = Migrate(tmpdir, "generic", nil, nil, nil, nil) + if err != errUnsupported { + t.Fatalf("expected unsupported error, got %q", err) + } +} + +func addImage(dest, jsonConfig, parent, checksum string) (string, error) { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return "", err + } + if config.ID == "" { + b := make([]byte, 32) + rand.Read(b) + config.ID = hex.EncodeToString(b) + } + contDir := filepath.Join(dest, "graph", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return "", err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "json"), []byte(jsonConfig), 0600); err != nil { + return "", err + } + if parent != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "parent"), []byte(parent), 0600); err != nil { + return "", err + } + } + if checksum != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { + return "", err + } + } + return config.ID, nil +} + +func addContainer(dest, jsonConfig string) error { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return err + } + contDir := filepath.Join(dest, "containers", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "config.json"), []byte(jsonConfig), 0600); err != nil { + return err + } + return nil +} + +type mockTagAdder struct { + refs map[string]string +} + +func (t *mockTagAdder) Add(ref reference.Named, id image.ID, force bool) error { + if t.refs == nil { + t.refs = make(map[string]string) + } + t.refs[ref.String()] = id.String() + return nil +} + +type mockRegistrar struct { + layers map[layer.ChainID]*mockLayer + count int +} + +func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, tarDataFile string) (layer.Layer, error) { + r.count++ + l := &mockLayer{} + if parent != "" { + p, exists := r.layers[parent] + if !exists { + return nil, fmt.Errorf("invalid parent %q", parent) + } + l.parent = p + l.diffIDs = append(l.diffIDs, p.diffIDs...) + } + l.diffIDs = append(l.diffIDs, layer.EmptyLayer.DiffID()) + if r.layers == nil { + r.layers = make(map[layer.ChainID]*mockLayer) + } + r.layers[l.ChainID()] = l + return l, nil +} +func (r *mockRegistrar) Release(l layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} +func (r *mockRegistrar) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +type mountInfo struct { + name, graphID, parent string +} +type mockMounter struct { + mounts []mountInfo + count int +} + +func (r *mockMounter) MountByGraphID(name string, graphID string, parent layer.ChainID) (layer.RWLayer, error) { + r.mounts = append(r.mounts, mountInfo{name, graphID, string(parent)}) + r.count++ + return nil, nil +} +func (r *mockMounter) Unmount(string) error { + r.count-- + return nil +} +func (r *mockMounter) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (r *mockMounter) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} + +type mockLayer struct { + diffIDs []layer.DiffID + parent *mockLayer +} + +func (l *mockLayer) TarStream() (io.Reader, error) { + return nil, nil +} + +func (l *mockLayer) ChainID() layer.ChainID { + return layer.CreateChainID(l.diffIDs) +} + +func (l *mockLayer) DiffID() layer.DiffID { + return l.diffIDs[len(l.diffIDs)-1] +} + +func (l *mockLayer) Parent() layer.Layer { + if l.parent == nil { + return nil + } + return l.parent +} + +func (l *mockLayer) Size() (int64, error) { + return 0, nil +} + +func (l *mockLayer) DiffSize() (int64, error) { + return 0, nil +} + +func (l *mockLayer) Metadata() (map[string]string, error) { + return nil, nil +} From 4352da7803d182a6013a5238ce20a7c749db979a Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Wed, 18 Nov 2015 14:20:54 -0800 Subject: [PATCH 6/7] Update daemon and docker core to use new content addressable storage Add distribution package for managing pulls and pushes. This is based on the old code in the graph package, with major changes to work with the new image/layer model. Add v1 migration code. Update registry, api/*, and daemon packages to use the reference package's types where applicable. Update daemon package to use image/layer/tag stores instead of the graph package Signed-off-by: Aaron Lehmann Signed-off-by: Tonis Tiigi --- api/client/build.go | 57 +- api/client/commit.go | 28 +- api/client/create.go | 69 ++- api/client/images.go | 34 +- api/client/import.go | 9 +- api/client/ps/custom.go | 4 +- api/client/pull.go | 43 +- api/client/push.go | 20 +- api/client/search.go | 6 +- api/client/tag.go | 31 +- api/client/trust.go | 35 +- api/server/router/local/image.go | 145 +++-- builder/builder.go | 2 +- builder/dockerfile/builder.go | 4 +- builder/dockerfile/internals.go | 10 +- daemon/commit.go | 106 +++- daemon/container.go | 11 +- daemon/container_unit_test.go | 2 +- daemon/container_unix.go | 11 +- daemon/container_windows.go | 26 +- daemon/create.go | 21 +- daemon/daemon.go | 501 ++++++++++++------ daemon/daemon_unix.go | 12 +- daemon/daemon_windows.go | 81 ++- daemon/daemonbuilder/builder.go | 42 +- daemon/delete.go | 11 +- daemon/errors.go | 24 +- daemon/events/filter.go | 9 +- daemon/graphdriver/fsdiff.go | 10 +- daemon/graphdriver/imagerestorer.go | 31 -- daemon/graphdriver/plugin.go | 1 - daemon/graphdriver/proxy.go | 1 - daemon/graphdriver/vfs/driver.go | 9 +- daemon/graphdriver/vfs/driver_unsupported.go | 3 - daemon/graphdriver/windows/windows.go | 196 ++++--- daemon/image_delete.go | 157 +++--- daemon/images.go | 163 ++++++ daemon/import.go | 111 ++++ daemon/info.go | 2 +- daemon/inspect.go | 16 +- daemon/list.go | 54 +- errors/daemon.go | 9 - integration-cli/docker_api_containers_test.go | 4 +- integration-cli/docker_cli_build_test.go | 4 +- integration-cli/docker_cli_by_digest_test.go | 10 +- integration-cli/docker_cli_create_test.go | 36 ++ ...cker_cli_external_graphdriver_unix_test.go | 3 +- integration-cli/docker_cli_images_test.go | 10 +- integration-cli/docker_cli_inspect_test.go | 7 +- integration-cli/docker_cli_pull_local_test.go | 68 +++ integration-cli/docker_cli_pull_test.go | 271 +--------- integration-cli/docker_cli_push_test.go | 43 -- integration-cli/docker_cli_rmi_test.go | 3 +- integration-cli/docker_cli_run_test.go | 12 + integration-cli/docker_cli_save_load_test.go | 32 +- .../docker_cli_save_load_unix_test.go | 8 +- integration-cli/docker_cli_tag_test.go | 58 +- pkg/parsers/parsers.go | 20 - pkg/parsers/parsers_test.go | 30 -- pkg/stringid/stringid.go | 4 + registry/config.go | 163 +++--- registry/registry_mock_test.go | 31 +- registry/registry_test.go | 219 +++++--- registry/service.go | 56 +- registry/service_v1.go | 12 +- registry/service_v2.go | 12 +- registry/session.go | 29 +- registry/types.go | 10 +- utils/utils.go | 17 - utils/utils_test.go | 30 -- 70 files changed, 2037 insertions(+), 1282 deletions(-) delete mode 100644 daemon/graphdriver/imagerestorer.go delete mode 100644 daemon/graphdriver/vfs/driver_unsupported.go create mode 100644 daemon/images.go create mode 100644 daemon/import.go diff --git a/api/client/build.go b/api/client/build.go index d4e2d514e4..9b48292187 100644 --- a/api/client/build.go +++ b/api/client/build.go @@ -18,16 +18,15 @@ import ( "strconv" "strings" + "github.com/docker/distribution/reference" "github.com/docker/docker/api" Cli "github.com/docker/docker/cli" - "github.com/docker/docker/graph/tags" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/jsonmessage" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/ulimit" @@ -35,6 +34,7 @@ import ( "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" + tagpkg "github.com/docker/docker/tag" "github.com/docker/docker/utils" ) @@ -323,7 +323,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { // Since the build was successful, now we must tag any of the resolved // images from the above Dockerfile rewrite. for _, resolved := range resolvedTags { - if err := cli.tagTrusted(resolved.repoInfo, resolved.digestRef, resolved.tagRef); err != nil { + if err := cli.tagTrusted(resolved.digestRef, resolved.tagRef); err != nil { return err } } @@ -333,16 +333,12 @@ func (cli *DockerCli) CmdBuild(args ...string) error { // validateTag checks if the given image name can be resolved. func validateTag(rawRepo string) (string, error) { - repository, tag := parsers.ParseRepositoryTag(rawRepo) - if err := registry.ValidateRepositoryName(repository); err != nil { + ref, err := reference.ParseNamed(rawRepo) + if err != nil { return "", err } - if len(tag) == 0 { - return rawRepo, nil - } - - if err := tags.ValidateTagName(tag); err != nil { + if err := registry.ValidateRepositoryName(ref); err != nil { return "", err } @@ -565,15 +561,16 @@ func (td *trustedDockerfile) Close() error { // resolvedTag records the repository, tag, and resolved digest reference // from a Dockerfile rewrite. type resolvedTag struct { - repoInfo *registry.RepositoryInfo - digestRef, tagRef registry.Reference + repoInfo *registry.RepositoryInfo + digestRef reference.Canonical + tagRef reference.NamedTagged } // rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in // "FROM " instructions to a digest reference. `translator` is a // function that takes a repository name and tag reference and returns a // trusted digest reference. -func rewriteDockerfileFrom(dockerfileName string, translator func(string, registry.Reference) (registry.Reference, error)) (newDockerfile *trustedDockerfile, resolvedTags []*resolvedTag, err error) { +func rewriteDockerfileFrom(dockerfileName string, translator func(reference.NamedTagged) (reference.Canonical, error)) (newDockerfile *trustedDockerfile, resolvedTags []*resolvedTag, err error) { dockerfile, err := os.Open(dockerfileName) if err != nil { return nil, nil, fmt.Errorf("unable to open Dockerfile: %v", err) @@ -607,29 +604,39 @@ func rewriteDockerfileFrom(dockerfileName string, translator func(string, regist matches := dockerfileFromLinePattern.FindStringSubmatch(line) if matches != nil && matches[1] != "scratch" { // Replace the line with a resolved "FROM repo@digest" - repo, tag := parsers.ParseRepositoryTag(matches[1]) - if tag == "" { - tag = tags.DefaultTag - } - - repoInfo, err := registry.ParseRepositoryInfo(repo) + ref, err := reference.ParseNamed(matches[1]) if err != nil { - return nil, nil, fmt.Errorf("unable to parse repository info %q: %v", repo, err) + return nil, nil, err } - ref := registry.ParseReference(tag) + digested := false + switch ref.(type) { + case reference.Tagged: + case reference.Digested: + digested = true + default: + ref, err = reference.WithTag(ref, tagpkg.DefaultTag) + if err != nil { + return nil, nil, err + } + } - if !ref.HasDigest() && isTrusted() { - trustedRef, err := translator(repo, ref) + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return nil, nil, fmt.Errorf("unable to parse repository info %q: %v", ref.String(), err) + } + + if !digested && isTrusted() { + trustedRef, err := translator(ref.(reference.NamedTagged)) if err != nil { return nil, nil, err } - line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.ImageName(repo))) + line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.String())) resolvedTags = append(resolvedTags, &resolvedTag{ repoInfo: repoInfo, digestRef: trustedRef, - tagRef: ref, + tagRef: ref.(reference.NamedTagged), }) } } diff --git a/api/client/commit.go b/api/client/commit.go index 451813e376..36fcb3c77e 100644 --- a/api/client/commit.go +++ b/api/client/commit.go @@ -2,14 +2,15 @@ package client import ( "encoding/json" + "errors" "fmt" "net/url" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" Cli "github.com/docker/docker/cli" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" ) @@ -32,20 +33,35 @@ func (cli *DockerCli) CmdCommit(args ...string) error { cmd.ParseFlags(args, true) var ( - name = cmd.Arg(0) - repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) + name = cmd.Arg(0) + repositoryAndTag = cmd.Arg(1) + repositoryName string + tag string ) //Check if the given image name can be resolved - if repository != "" { - if err := registry.ValidateRepositoryName(repository); err != nil { + if repositoryAndTag != "" { + ref, err := reference.ParseNamed(repositoryAndTag) + if err != nil { return err } + if err := registry.ValidateRepositoryName(ref); err != nil { + return err + } + + repositoryName = ref.Name() + + switch x := ref.(type) { + case reference.Digested: + return errors.New("cannot commit to digest reference") + case reference.Tagged: + tag = x.Tag() + } } v := url.Values{} v.Set("container", name) - v.Set("repo", repository) + v.Set("repo", repositoryName) v.Set("tag", tag) v.Set("comment", *flComment) v.Set("author", *flAuthor) diff --git a/api/client/create.go b/api/client/create.go index 9ef0edab6c..9443799e55 100644 --- a/api/client/create.go +++ b/api/client/create.go @@ -9,12 +9,12 @@ import ( "os" "strings" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" Cli "github.com/docker/docker/cli" - "github.com/docker/docker/graph/tags" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" + tagpkg "github.com/docker/docker/tag" ) func (cli *DockerCli) pullImage(image string) error { @@ -23,16 +23,28 @@ func (cli *DockerCli) pullImage(image string) error { func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { v := url.Values{} - repos, tag := parsers.ParseRepositoryTag(image) - // pull only the image tagged 'latest' if no tag was specified - if tag == "" { - tag = tags.DefaultTag + + ref, err := reference.ParseNamed(image) + if err != nil { + return err } - v.Set("fromImage", repos) + + var tag string + switch x := ref.(type) { + case reference.Digested: + tag = x.Digest().String() + case reference.Tagged: + tag = x.Tag() + default: + // pull only the image tagged 'latest' if no tag was specified + tag = tagpkg.DefaultTag + } + + v.Set("fromImage", ref.Name()) v.Set("tag", tag) // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(repos) + repoInfo, err := registry.ParseRepositoryInfo(ref) if err != nil { return err } @@ -94,39 +106,46 @@ func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runc defer containerIDFile.Close() } - repo, tag := parsers.ParseRepositoryTag(config.Image) - if tag == "" { - tag = tags.DefaultTag + ref, err := reference.ParseNamed(config.Image) + if err != nil { + return nil, err } - ref := registry.ParseReference(tag) - var trustedRef registry.Reference - - if isTrusted() && !ref.HasDigest() { - var err error - trustedRef, err = cli.trustedReference(repo, ref) + isDigested := false + switch ref.(type) { + case reference.Tagged: + case reference.Digested: + isDigested = true + default: + ref, err = reference.WithTag(ref, tagpkg.DefaultTag) if err != nil { return nil, err } - config.Image = trustedRef.ImageName(repo) + } + + var trustedRef reference.Canonical + + if isTrusted() && !isDigested { + var err error + trustedRef, err = cli.trustedReference(ref.(reference.NamedTagged)) + if err != nil { + return nil, err + } + config.Image = trustedRef.String() } //create the container serverResp, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil) //if image not found try to pull it if serverResp.statusCode == 404 && strings.Contains(err.Error(), config.Image) { - fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.ImageName(repo)) + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.String()) // we don't want to write to stdout anything apart from container.ID if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { return nil, err } - if trustedRef != nil && !ref.HasDigest() { - repoInfo, err := registry.ParseRepositoryInfo(repo) - if err != nil { - return nil, err - } - if err := cli.tagTrusted(repoInfo, trustedRef, ref); err != nil { + if trustedRef != nil && !isDigested { + if err := cli.tagTrusted(trustedRef, ref.(reference.NamedTagged)); err != nil { return nil, err } } diff --git a/api/client/images.go b/api/client/images.go index d47deb62d3..ba26512a2d 100644 --- a/api/client/images.go +++ b/api/client/images.go @@ -4,18 +4,18 @@ import ( "encoding/json" "fmt" "net/url" + "strings" "text/tabwriter" "time" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" Cli "github.com/docker/docker/cli" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/filters" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/units" - "github.com/docker/docker/utils" ) // CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. @@ -78,9 +78,9 @@ func (cli *DockerCli) CmdImages(args ...string) error { w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { if *showDigests { - fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE") + fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tSIZE") } else { - fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") + fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tSIZE") } } @@ -101,21 +101,31 @@ func (cli *DockerCli) CmdImages(args ...string) error { // combine the tags and digests lists tagsAndDigests := append(repoTags, repoDigests...) for _, repoAndRef := range tagsAndDigests { - repo, ref := parsers.ParseRepositoryTag(repoAndRef) - // default tag and digest to none - if there's a value, it'll be set below + // default repo, tag, and digest to none - if there's a value, it'll be set below + repo := "" tag := "" digest := "" - if utils.DigestReference(ref) { - digest = ref - } else { - tag = ref + + if !strings.HasPrefix(repoAndRef, "") { + ref, err := reference.ParseNamed(repoAndRef) + if err != nil { + return err + } + repo = ref.Name() + + switch x := ref.(type) { + case reference.Digested: + digest = x.Digest().String() + case reference.Tagged: + tag = x.Tag() + } } if !*quiet { if *showDigests { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.Size))) } else { - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.Size))) } } else { fmt.Fprintln(w, ID) diff --git a/api/client/import.go b/api/client/import.go index c64e88d3c1..2838debd98 100644 --- a/api/client/import.go +++ b/api/client/import.go @@ -6,10 +6,10 @@ import ( "net/url" "os" + "github.com/docker/distribution/reference" Cli "github.com/docker/docker/cli" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" ) @@ -47,8 +47,11 @@ func (cli *DockerCli) CmdImport(args ...string) error { if repository != "" { //Check if the given image name can be resolved - repo, _ := parsers.ParseRepositoryTag(repository) - if err := registry.ValidateRepositoryName(repo); err != nil { + ref, err := reference.ParseNamed(repository) + if err != nil { + return err + } + if err := registry.ValidateRepositoryName(ref); err != nil { return err } } diff --git a/api/client/ps/custom.go b/api/client/ps/custom.go index 1739fd12a6..77e460892b 100644 --- a/api/client/ps/custom.go +++ b/api/client/ps/custom.go @@ -62,8 +62,8 @@ func (c *containerContext) Image() string { return "" } if c.trunc { - if stringid.TruncateID(c.c.ImageID) == stringid.TruncateID(c.c.Image) { - return stringutils.Truncate(c.c.Image, 12) + if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { + return trunc } } return c.c.Image diff --git a/api/client/pull.go b/api/client/pull.go index dcb2ecb0c7..e585a12081 100644 --- a/api/client/pull.go +++ b/api/client/pull.go @@ -1,16 +1,19 @@ package client import ( + "errors" "fmt" "net/url" + "github.com/docker/distribution/reference" Cli "github.com/docker/docker/cli" - "github.com/docker/docker/graph/tags" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/registry" + tagpkg "github.com/docker/docker/tag" ) +var errTagCantBeUsed = errors.New("tag can't be used with --all-tags/-a") + // CmdPull pulls an image or a repository from the registry. // // Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST] @@ -23,18 +26,38 @@ func (cli *DockerCli) CmdPull(args ...string) error { cmd.ParseFlags(args, true) remote := cmd.Arg(0) - taglessRemote, tag := parsers.ParseRepositoryTag(remote) - if tag == "" && !*allTags { - tag = tags.DefaultTag - fmt.Fprintf(cli.out, "Using default tag: %s\n", tag) - } else if tag != "" && *allTags { - return fmt.Errorf("tag can't be used with --all-tags/-a") + distributionRef, err := reference.ParseNamed(remote) + if err != nil { + return err + } + + var tag string + switch x := distributionRef.(type) { + case reference.Digested: + if *allTags { + return errTagCantBeUsed + } + tag = x.Digest().String() + case reference.Tagged: + if *allTags { + return errTagCantBeUsed + } + tag = x.Tag() + default: + if !*allTags { + tag = tagpkg.DefaultTag + distributionRef, err = reference.WithTag(distributionRef, tag) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "Using default tag: %s\n", tag) + } } ref := registry.ParseReference(tag) // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) + repoInfo, err := registry.ParseRepositoryInfo(distributionRef) if err != nil { return err } @@ -46,7 +69,7 @@ func (cli *DockerCli) CmdPull(args ...string) error { } v := url.Values{} - v.Set("fromImage", ref.ImageName(taglessRemote)) + v.Set("fromImage", distributionRef.String()) _, _, err = cli.clientRequestAttemptLogin("POST", "/images/create?"+v.Encode(), nil, cli.out, repoInfo.Index, "pull") return err diff --git a/api/client/push.go b/api/client/push.go index 0c8823f051..760c97ef8b 100644 --- a/api/client/push.go +++ b/api/client/push.go @@ -1,12 +1,13 @@ package client import ( + "errors" "fmt" "net/url" + "github.com/docker/distribution/reference" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/registry" ) @@ -20,10 +21,21 @@ func (cli *DockerCli) CmdPush(args ...string) error { cmd.ParseFlags(args, true) - remote, tag := parsers.ParseRepositoryTag(cmd.Arg(0)) + ref, err := reference.ParseNamed(cmd.Arg(0)) + if err != nil { + return err + } + + var tag string + switch x := ref.(type) { + case reference.Digested: + return errors.New("cannot push a digest reference") + case reference.Tagged: + tag = x.Tag() + } // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(remote) + repoInfo, err := registry.ParseRepositoryInfo(ref) if err != nil { return err } @@ -48,6 +60,6 @@ func (cli *DockerCli) CmdPush(args ...string) error { v := url.Values{} v.Set("tag", tag) - _, _, err = cli.clientRequestAttemptLogin("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, repoInfo.Index, "push") + _, _, err = cli.clientRequestAttemptLogin("POST", "/images/"+ref.Name()+"/push?"+v.Encode(), nil, cli.out, repoInfo.Index, "push") return err } diff --git a/api/client/search.go b/api/client/search.go index f02d5fe8b0..1a47064477 100644 --- a/api/client/search.go +++ b/api/client/search.go @@ -10,7 +10,6 @@ import ( Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/registry" ) @@ -38,10 +37,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error { v := url.Values{} v.Set("term", name) - // Resolve the Repository name from fqn to hostname + name - taglessRemote, _ := parsers.ParseRepositoryTag(name) - - indexInfo, err := registry.ParseIndexInfo(taglessRemote) + indexInfo, err := registry.ParseSearchIndexInfo(name) if err != nil { return err } diff --git a/api/client/tag.go b/api/client/tag.go index 505e0208cd..99a5a43bbf 100644 --- a/api/client/tag.go +++ b/api/client/tag.go @@ -1,11 +1,12 @@ package client import ( + "errors" "net/url" + "github.com/docker/distribution/reference" Cli "github.com/docker/docker/cli" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/registry" ) @@ -19,16 +20,28 @@ func (cli *DockerCli) CmdTag(args ...string) error { cmd.ParseFlags(args, true) - var ( - repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) - v = url.Values{} - ) - - //Check if the given image name can be resolved - if err := registry.ValidateRepositoryName(repository); err != nil { + v := url.Values{} + ref, err := reference.ParseNamed(cmd.Arg(1)) + if err != nil { return err } - v.Set("repo", repository) + + _, isDigested := ref.(reference.Digested) + if isDigested { + return errors.New("refusing to create a tag with a digest reference") + } + + tag := "" + tagged, isTagged := ref.(reference.Tagged) + if isTagged { + tag = tagged.Tag() + } + + //Check if the given image name can be resolved + if err := registry.ValidateRepositoryName(ref); err != nil { + return err + } + v.Set("repo", ref.Name()) v.Set("tag", tag) if *force { diff --git a/api/client/trust.go b/api/client/trust.go index 00dbf6de62..da6c3e5768 100644 --- a/api/client/trust.go +++ b/api/client/trust.go @@ -19,6 +19,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/cliconfig" @@ -163,12 +164,12 @@ func (cli *DockerCli) getNotaryRepository(repoInfo *registry.RepositoryInfo, aut } creds := simpleCredentialStore{auth: authConfig} - tokenHandler := auth.NewTokenHandler(authTransport, creds, repoInfo.CanonicalName, "push", "pull") + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoInfo.CanonicalName.Name(), "push", "pull") basicHandler := auth.NewBasicHandler(creds) modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) tr := transport.NewTransport(base, modifiers...) - return client.NewNotaryRepository(cli.trustDirectory(), repoInfo.CanonicalName, server, tr, cli.getPassphraseRetriever()) + return client.NewNotaryRepository(cli.trustDirectory(), repoInfo.CanonicalName.Name(), server, tr, cli.getPassphraseRetriever()) } func convertTarget(t client.Target) (target, error) { @@ -219,8 +220,8 @@ func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever { } } -func (cli *DockerCli) trustedReference(repo string, ref registry.Reference) (registry.Reference, error) { - repoInfo, err := registry.ParseRepositoryInfo(repo) +func (cli *DockerCli) trustedReference(ref reference.NamedTagged) (reference.Canonical, error) { + repoInfo, err := registry.ParseRepositoryInfo(ref) if err != nil { return nil, err } @@ -234,7 +235,7 @@ func (cli *DockerCli) trustedReference(repo string, ref registry.Reference) (reg return nil, err } - t, err := notaryRepo.GetTargetByName(ref.String()) + t, err := notaryRepo.GetTargetByName(ref.Tag()) if err != nil { return nil, err } @@ -244,18 +245,17 @@ func (cli *DockerCli) trustedReference(repo string, ref registry.Reference) (reg } - return registry.DigestReference(r.digest), nil + return reference.WithDigest(ref, r.digest) } -func (cli *DockerCli) tagTrusted(repoInfo *registry.RepositoryInfo, trustedRef, ref registry.Reference) error { - fullName := trustedRef.ImageName(repoInfo.LocalName) - fmt.Fprintf(cli.out, "Tagging %s as %s\n", fullName, ref.ImageName(repoInfo.LocalName)) +func (cli *DockerCli) tagTrusted(trustedRef reference.Canonical, ref reference.NamedTagged) error { + fmt.Fprintf(cli.out, "Tagging %s as %s\n", trustedRef.String(), ref.String()) tv := url.Values{} - tv.Set("repo", repoInfo.LocalName) - tv.Set("tag", ref.String()) + tv.Set("repo", trustedRef.Name()) + tv.Set("tag", ref.Tag()) tv.Set("force", "1") - if _, _, err := readBody(cli.call("POST", "/images/"+fullName+"/tag?"+tv.Encode(), nil, nil)); err != nil { + if _, _, err := readBody(cli.call("POST", "/images/"+trustedRef.String()+"/tag?"+tv.Encode(), nil, nil)); err != nil { return err } @@ -317,7 +317,7 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr refs = append(refs, r) } - v.Set("fromImage", repoInfo.LocalName) + v.Set("fromImage", repoInfo.LocalName.Name()) for i, r := range refs { displayTag := r.reference.String() if displayTag != "" { @@ -333,7 +333,12 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr // If reference is not trusted, tag by trusted reference if !r.reference.HasDigest() { - if err := cli.tagTrusted(repoInfo, registry.DigestReference(r.digest), r.reference); err != nil { + tagged, err := reference.WithTag(repoInfo.LocalName, r.reference.String()) + if err != nil { + return err + } + trustedRef, err := reference.WithDigest(repoInfo.LocalName, r.digest) + if err := cli.tagTrusted(trustedRef, tagged); err != nil { return err } @@ -386,7 +391,7 @@ func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string, v := url.Values{} v.Set("tag", tag) - _, _, err := cli.clientRequestAttemptLogin("POST", "/images/"+repoInfo.LocalName+"/push?"+v.Encode(), nil, streamOut, repoInfo.Index, "push") + _, _, err := cli.clientRequestAttemptLogin("POST", "/images/"+repoInfo.LocalName.Name()+"/push?"+v.Encode(), nil, streamOut, repoInfo.Index, "push") // Close stream channel to finish target parsing if err := streamOut.Close(); err != nil { return err diff --git a/api/server/router/local/image.go b/api/server/router/local/image.go index 7e41eac894..77f58f6b66 100644 --- a/api/server/router/local/image.go +++ b/api/server/router/local/image.go @@ -10,6 +10,8 @@ import ( "strings" "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/types" "github.com/docker/docker/builder" @@ -17,17 +19,14 @@ import ( "github.com/docker/docker/cliconfig" "github.com/docker/docker/daemon/daemonbuilder" derr "github.com/docker/docker/errors" - "github.com/docker/docker/graph" - "github.com/docker/docker/graph/tags" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/ulimit" - "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" + tagpkg "github.com/docker/docker/tag" "github.com/docker/docker/utils" "golang.org/x/net/context" ) @@ -110,26 +109,55 @@ func (s *router) postImagesCreate(ctx context.Context, w http.ResponseWriter, r w.Header().Set("Content-Type", "application/json") if image != "" { //pull - if tag == "" { - image, tag = parsers.ParseRepositoryTag(image) - } - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v + // Special case: "pull -a" may send an image name with a + // trailing :. This is ugly, but let's not break API + // compatibility. + image = strings.TrimSuffix(image, ":") + + var ref reference.Named + ref, err = reference.ParseNamed(image) + if err == nil { + if tag != "" { + // The "tag" could actually be a digest. + var dgst digest.Digest + dgst, err = digest.ParseDigest(tag) + if err == nil { + ref, err = reference.WithDigest(ref, dgst) + } else { + ref, err = reference.WithTag(ref, tag) + } + } + if err == nil { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + err = s.daemon.PullImage(ref, metaHeaders, authConfig, output) } } - - imagePullConfig := &graph.ImagePullConfig{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - OutStream: output, - } - - err = s.daemon.PullImage(image, tag, imagePullConfig) } else { //import - if tag == "" { - repo, tag = parsers.ParseRepositoryTag(repo) + var newRef reference.Named + if repo != "" { + var err error + newRef, err = reference.ParseNamed(repo) + if err != nil { + return err + } + + switch newRef.(type) { + case reference.Digested: + return errors.New("cannot import digest reference") + } + + if tag != "" { + newRef, err = reference.WithTag(newRef, tag) + if err != nil { + return err + } + } } src := r.Form.Get("fromSrc") @@ -143,7 +171,7 @@ func (s *router) postImagesCreate(ctx context.Context, w http.ResponseWriter, r return err } - err = s.daemon.ImportImage(src, repo, tag, message, r.Body, output, newConfig) + err = s.daemon.ImportImage(src, newRef, message, r.Body, output, newConfig) } if err != nil { if !output.Flushed() { @@ -183,19 +211,25 @@ func (s *router) postImagesPush(ctx context.Context, w http.ResponseWriter, r *h } } - name := vars["name"] + ref, err := reference.ParseNamed(vars["name"]) + if err != nil { + return err + } + tag := r.Form.Get("tag") + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + output := ioutils.NewWriteFlusher(w) defer output.Close() - imagePushConfig := &graph.ImagePushConfig{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - Tag: r.Form.Get("tag"), - OutStream: output, - } w.Header().Set("Content-Type", "application/json") - if err := s.daemon.PushImage(name, imagePushConfig); err != nil { + if err := s.daemon.PushImage(ref, metaHeaders, authConfig, output); err != nil { if !output.Flushed() { return err } @@ -428,7 +462,7 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R } for _, rt := range repoAndTags { - if err := s.daemon.TagImage(rt.repo, rt.tag, string(imgID), true); err != nil { + if err := s.daemon.TagImage(rt, imgID, true); err != nil { return errf(err) } } @@ -436,43 +470,38 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R return nil } -// repoAndTag is a helper struct for holding the parsed repositories and tags of -// the input "t" argument. -type repoAndTag struct { - repo, tag string -} - // sanitizeRepoAndTags parses the raw "t" parameter received from the client // to a slice of repoAndTag. // It also validates each repoName and tag. -func sanitizeRepoAndTags(names []string) ([]repoAndTag, error) { +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { var ( - repoAndTags []repoAndTag + repoAndTags []reference.Named // This map is used for deduplicating the "-t" paramter. uniqNames = make(map[string]struct{}) ) for _, repo := range names { - name, tag := parsers.ParseRepositoryTag(repo) - if name == "" { + if repo == "" { continue } - if err := registry.ValidateRepositoryName(name); err != nil { + ref, err := reference.ParseNamed(repo) + if err != nil { return nil, err } - nameWithTag := name - if len(tag) > 0 { - if err := tags.ValidateTagName(tag); err != nil { - return nil, err - } - nameWithTag += ":" + tag - } else { - nameWithTag += ":" + tags.DefaultTag + if _, isDigested := ref.(reference.Digested); isDigested { + return nil, errors.New("build tag cannot be a digest") } + + if _, isTagged := ref.(reference.Tagged); !isTagged { + ref, err = reference.WithTag(ref, tagpkg.DefaultTag) + } + + nameWithTag := ref.String() + if _, exists := uniqNames[nameWithTag]; !exists { uniqNames[nameWithTag] = struct{}{} - repoAndTags = append(repoAndTags, repoAndTag{repo: name, tag: tag}) + repoAndTags = append(repoAndTags, ref) } } return repoAndTags, nil @@ -484,7 +513,7 @@ func (s *router) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *ht } // FIXME: The filter parameter could just be a match filter - images, err := s.daemon.ListImages(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all")) + images, err := s.daemon.Images(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all")) if err != nil { return err } @@ -508,9 +537,17 @@ func (s *router) postImagesTag(ctx context.Context, w http.ResponseWriter, r *ht } repo := r.Form.Get("repo") tag := r.Form.Get("tag") - name := vars["name"] + newTag, err := reference.WithName(repo) + if err != nil { + return err + } + if tag != "" { + if newTag, err = reference.WithTag(newTag, tag); err != nil { + return err + } + } force := httputils.BoolValue(r, "force") - if err := s.daemon.TagImage(repo, tag, name, force); err != nil { + if err := s.daemon.TagImage(newTag, vars["name"], force); err != nil { return err } w.WriteHeader(http.StatusCreated) diff --git a/builder/builder.go b/builder/builder.go index ac045fe41e..e5d4d63f9b 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -125,7 +125,7 @@ type Docker interface { // Remove removes a container specified by `id`. Remove(id string, cfg *daemon.ContainerRmConfig) error // Commit creates a new Docker image from an existing Docker container. - Commit(string, *daemon.ContainerCommitConfig) (*image.Image, error) + Commit(string, *daemon.ContainerCommitConfig) (string, error) // Copy copies/extracts a source FileInfo to a destination path inside a container // specified by a container object. // TODO: make an Extract method instead of passing `decompress` diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go index bd4283daf9..77820a9715 100644 --- a/builder/dockerfile/builder.go +++ b/builder/dockerfile/builder.go @@ -277,9 +277,9 @@ func Commit(containerName string, d *daemon.Daemon, c *CommitConfig) (string, er MergeConfigs: true, } - img, err := d.Commit(containerName, commitCfg) + imgID, err := d.Commit(containerName, commitCfg) if err != nil { return "", err } - return img.ID, nil + return imgID, nil } diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index a251af4f6e..cdb86cdd17 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -83,13 +83,13 @@ func (b *Builder) commit(id string, autoCmd *stringutils.StrSlice, comment strin } // Commit the container - image, err := b.docker.Commit(id, commitCfg) + imageID, err := b.docker.Commit(id, commitCfg) if err != nil { return err } - b.docker.Retain(b.id, image.ID) - b.activeImages = append(b.activeImages, image.ID) - b.image = image.ID + b.docker.Retain(b.id, imageID) + b.activeImages = append(b.activeImages, imageID) + b.image = imageID return nil } @@ -412,7 +412,7 @@ func containsWildcards(name string) bool { } func (b *Builder) processImageFrom(img *image.Image) error { - b.image = img.ID + b.image = img.ID().String() if img.Config != nil { b.runConfig = img.Config diff --git a/daemon/commit.go b/daemon/commit.go index 61808eabce..01bfa3c7ae 100644 --- a/daemon/commit.go +++ b/daemon/commit.go @@ -1,10 +1,16 @@ package daemon import ( + "encoding/json" "fmt" "runtime" + "strings" + "time" + "github.com/docker/distribution/reference" + "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" + "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/runconfig" @@ -25,15 +31,15 @@ type ContainerCommitConfig struct { // Commit creates a new filesystem image from the current state of a container. // The image can optionally be tagged into a repository. -func (daemon *Daemon) Commit(name string, c *ContainerCommitConfig) (*image.Image, error) { +func (daemon *Daemon) Commit(name string, c *ContainerCommitConfig) (string, error) { container, err := daemon.Get(name) if err != nil { - return nil, err + return "", err } // It is not possible to commit a running container on Windows if runtime.GOOS == "windows" && container.IsRunning() { - return nil, fmt.Errorf("Windows does not support commit of a running container") + return "", fmt.Errorf("Windows does not support commit of a running container") } if c.Pause && !container.isPaused() { @@ -43,13 +49,13 @@ func (daemon *Daemon) Commit(name string, c *ContainerCommitConfig) (*image.Imag if c.MergeConfigs { if err := runconfig.Merge(c.Config, container.Config); err != nil { - return nil, err + return "", err } } rwTar, err := daemon.exportContainerRw(container) if err != nil { - return nil, err + return "", err } defer func() { if rwTar != nil { @@ -57,31 +63,99 @@ func (daemon *Daemon) Commit(name string, c *ContainerCommitConfig) (*image.Imag } }() - // Create a new image from the container's base layers + a new layer from container changes - img, err := daemon.graph.Create(rwTar, container.ID, container.ImageID, c.Comment, c.Author, container.Config, c.Config) - if err != nil { - return nil, err + var history []image.History + rootFS := image.NewRootFS() + + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return "", err + } + history = img.History + rootFS = img.RootFS + } + + l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) + if err != nil { + return "", err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + h := image.History{ + Author: c.Author, + Created: time.Now().UTC(), + CreatedBy: strings.Join(container.Config.Cmd.Slice(), " "), + Comment: c.Comment, + EmptyLayer: true, + } + + if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { + h.EmptyLayer = false + rootFS.Append(diffID) + } + + history = append(history, h) + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: c.Config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Container: container.ID, + ContainerConfig: *container.Config, + Author: c.Author, + Created: h.Created, + }, + RootFS: rootFS, + History: history, + }) + + if err != nil { + return "", err + } + + id, err := daemon.imageStore.Create(config) + if err != nil { + return "", err + } + + if container.ImageID != "" { + if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { + return "", err + } } - // Register the image if needed if c.Repo != "" { - if err := daemon.repositories.Tag(c.Repo, c.Tag, img.ID, true); err != nil { - return img, err + newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer + if err != nil { + return "", err + } + if c.Tag != "" { + if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { + return "", err + } + } + if err := daemon.TagImage(newTag, id.String(), true); err != nil { + return "", err } } daemon.LogContainerEvent(container, "commit") - return img, nil + return id.String(), nil } func (daemon *Daemon) exportContainerRw(container *Container) (archive.Archive, error) { - archive, err := daemon.diff(container) + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := container.rwlayer.TarStream() if err != nil { return nil, err } return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - return err + return daemon.layerStore.Unmount(container.ID) }), nil } diff --git a/daemon/container.go b/daemon/container.go index b15f4eebbc..e39093bb50 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -20,6 +20,8 @@ import ( "github.com/docker/docker/daemon/logger/jsonfilelog" "github.com/docker/docker/daemon/network" derr "github.com/docker/docker/errors" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/signal" @@ -29,6 +31,8 @@ import ( "github.com/docker/docker/volume" ) +const configFileName = "config.v2.json" + var ( // ErrRootFSReadOnly is returned when a container // rootfs is marked readonly. @@ -43,12 +47,13 @@ type CommonContainer struct { *State `json:"State"` // Needed for remote api version <= 1.11 root string // Path to the "home" of the container, including metadata. basefs string // Path to the graphdriver mountpoint + rwlayer layer.RWLayer ID string Created time.Time Path string Args []string Config *runconfig.Config - ImageID string `json:"Image"` + ImageID image.ID `json:"Image"` NetworkSettings *network.Settings LogPath string Name string @@ -256,7 +261,7 @@ func (container *Container) hostConfigPath() (string, error) { } func (container *Container) jsonPath() (string, error) { - return container.getRootResourcePath("config.json") + return container.getRootResourcePath(configFileName) } // This directory is only usable when the container is running @@ -301,7 +306,7 @@ func (container *Container) StartLogger(cfg runconfig.LogConfig) (logger.Logger, ContainerName: container.Name, ContainerEntrypoint: container.Path, ContainerArgs: container.Args, - ContainerImageID: container.ImageID, + ContainerImageID: container.ImageID.String(), ContainerImageName: container.Config.Image, ContainerCreated: container.Created, ContainerEnv: container.Config.Env, diff --git a/daemon/container_unit_test.go b/daemon/container_unit_test.go index f7951804a3..c42e7751f3 100644 --- a/daemon/container_unit_test.go +++ b/daemon/container_unit_test.go @@ -99,7 +99,7 @@ func TestContainerInitDNS(t *testing.T) { "Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, "UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` - if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(config), 0644); err != nil { + if err = ioutil.WriteFile(filepath.Join(containerPath, configFileName), []byte(config), 0644); err != nil { t.Fatal(err) } diff --git a/daemon/container_unix.go b/daemon/container_unix.go index a925717fe6..6d6ee0e7ad 100644 --- a/daemon/container_unix.go +++ b/daemon/container_unix.go @@ -19,7 +19,6 @@ import ( "github.com/docker/docker/daemon/links" "github.com/docker/docker/daemon/network" derr "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" @@ -388,8 +387,7 @@ func (daemon *Daemon) getSize(container *Container) (int64, int64) { } defer daemon.Unmount(container) - initID := fmt.Sprintf("%s-init", container.ID) - sizeRw, err = daemon.driver.DiffSize(container.ID, initID) + sizeRw, err = container.rwlayer.Size() if err != nil { logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", daemon.driver, container.ID, err) // FIXME: GetSize should return an error. Not changing it now in case @@ -397,9 +395,12 @@ func (daemon *Daemon) getSize(container *Container) (int64, int64) { sizeRw = -1 } - if _, err = os.Stat(container.basefs); err == nil { - if sizeRootfs, err = directory.Size(container.basefs); err != nil { + if parent := container.rwlayer.Parent(); parent != nil { + sizeRootfs, err = parent.Size() + if err != nil { sizeRootfs = -1 + } else if sizeRw != -1 { + sizeRootfs += sizeRw } } return sizeRw, sizeRootfs diff --git a/daemon/container_windows.go b/daemon/container_windows.go index 5400f12f45..9563f0d262 100644 --- a/daemon/container_windows.go +++ b/daemon/container_windows.go @@ -7,6 +7,7 @@ import ( "github.com/docker/docker/daemon/execdriver" derr "github.com/docker/docker/errors" + "github.com/docker/docker/layer" "github.com/docker/docker/volume" "github.com/docker/libnetwork" ) @@ -98,22 +99,25 @@ func (daemon *Daemon) populateCommand(c *Container, env []string) error { processConfig.Env = env var layerPaths []string - img, err := daemon.graph.Get(c.ImageID) + img, err := daemon.imageStore.Get(c.ImageID) if err != nil { return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err) } - for i := img; i != nil && err == nil; i, err = daemon.graph.GetParent(i) { - lp, err := daemon.driver.Get(i.ID, "") - if err != nil { - return derr.ErrorCodeGetLayer.WithArgs(daemon.driver.String(), i.ID, err) - } - layerPaths = append(layerPaths, lp) - err = daemon.driver.Put(i.ID) - if err != nil { - return derr.ErrorCodePutLayer.WithArgs(daemon.driver.String(), i.ID, err) + + if img.RootFS != nil && img.RootFS.Type == "layers+base" { + max := len(img.RootFS.DiffIDs) + for i := 0; i <= max; i++ { + img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] + path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) + if err != nil { + return derr.ErrorCodeGetLayer.WithArgs(err) + } + // Reverse order, expecting parent most first + layerPaths = append([]string{path}, layerPaths...) } } - m, err := daemon.driver.GetMetadata(c.ID) + + m, err := layer.RWLayerMetadata(daemon.layerStore, c.ID) if err != nil { return derr.ErrorCodeGetLayerMetadata.WithArgs(err) } diff --git a/daemon/create.go b/daemon/create.go index 2c26931733..3ae2e9cdeb 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -5,6 +5,7 @@ import ( "github.com/docker/docker/api/types" derr "github.com/docker/docker/errors" "github.com/docker/docker/image" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" "github.com/docker/docker/volume" @@ -34,7 +35,7 @@ func (daemon *Daemon) ContainerCreate(params *ContainerCreateConfig) (types.Cont container, err := daemon.create(params) if err != nil { - return types.ContainerCreateResponse{ID: "", Warnings: warnings}, daemon.graphNotExistToErrcode(params.Config.Image, err) + return types.ContainerCreateResponse{ID: "", Warnings: warnings}, daemon.imageNotExistToErrcode(err) } return types.ContainerCreateResponse{ID: container.ID, Warnings: warnings}, nil @@ -45,19 +46,16 @@ func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, re var ( container *Container img *image.Image - imgID string + imgID image.ID err error ) if params.Config.Image != "" { - img, err = daemon.repositories.LookupImage(params.Config.Image) + img, err = daemon.GetImage(params.Config.Image) if err != nil { return nil, err } - if err = daemon.graph.CheckDepth(img); err != nil { - return nil, err - } - imgID = img.ID + imgID = img.ID() } if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { @@ -87,15 +85,14 @@ func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, re if err := daemon.Register(container); err != nil { return nil, err } - container.Lock() - if err := parseSecurityOpt(container, params.HostConfig); err != nil { - container.Unlock() + rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + if err != nil { return nil, err } - container.Unlock() - if err := daemon.createRootfs(container); err != nil { + if err := idtools.MkdirAs(container.root, 0700, rootUID, rootGID); err != nil { return nil, err } + if err := daemon.setHostConfig(container, params.HostConfig); err != nil { return nil, err } diff --git a/daemon/daemon.go b/daemon/daemon.go index 435c93db31..6a8d18e923 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -18,6 +18,8 @@ import ( "time" "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/docker/api" "github.com/docker/docker/api/types" "github.com/docker/docker/cliconfig" @@ -29,9 +31,13 @@ import ( _ "github.com/docker/docker/daemon/graphdriver/vfs" // register vfs "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/network" + "github.com/docker/docker/distribution" + dmetadata "github.com/docker/docker/distribution/metadata" derr "github.com/docker/docker/errors" - "github.com/docker/docker/graph" "github.com/docker/docker/image" + "github.com/docker/docker/image/tarexport" + "github.com/docker/docker/layer" + "github.com/docker/docker/migrate/v1" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/discovery" "github.com/docker/docker/pkg/fileutils" @@ -50,12 +56,14 @@ import ( "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" + "github.com/docker/docker/tag" "github.com/docker/docker/utils" volumedrivers "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/local" "github.com/docker/docker/volume/store" "github.com/docker/libnetwork" lntypes "github.com/docker/libnetwork/types" + "github.com/docker/libtrust" "github.com/opencontainers/runc/libcontainer" ) @@ -66,6 +74,15 @@ var ( errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.") ) +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + RefOrID string +} + +func (e ErrImageDoesNotExist) Error() string { + return fmt.Sprintf("no such id: %s", e.RefOrID) +} + type contStore struct { s map[string]*Container sync.Mutex @@ -103,29 +120,33 @@ func (c *contStore) List() []*Container { // Daemon holds information about the Docker daemon. type Daemon struct { - ID string - repository string - sysInitPath string - containers *contStore - execCommands *exec.Store - graph *graph.Graph - repositories *graph.TagStore - idIndex *truncindex.TruncIndex - configStore *Config - containerGraphDB *graphdb.Database - driver graphdriver.Driver - execDriver execdriver.Driver - statsCollector *statsCollector - defaultLogConfig runconfig.LogConfig - RegistryService *registry.Service - EventsService *events.Events - netController libnetwork.NetworkController - volumes *store.VolumeStore - discoveryWatcher discovery.Watcher - root string - shutdown bool - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap + ID string + repository string + sysInitPath string + containers *contStore + execCommands *exec.Store + tagStore tag.Store + distributionPool *distribution.Pool + distributionMetadataStore dmetadata.Store + trustKey libtrust.PrivateKey + idIndex *truncindex.TruncIndex + configStore *Config + containerGraphDB *graphdb.Database + driver graphdriver.Driver + execDriver execdriver.Driver + statsCollector *statsCollector + defaultLogConfig runconfig.LogConfig + RegistryService *registry.Service + EventsService *events.Events + netController libnetwork.NetworkController + volumes *store.VolumeStore + discoveryWatcher discovery.Watcher + root string + shutdown bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + layerStore layer.Store + imageStore image.Store } // Get looks for a container using the provided information, which could be @@ -229,9 +250,7 @@ func (daemon *Daemon) Register(container *Container) error { container.unmountIpcMounts(mount.Unmount) - if err := daemon.Unmount(container); err != nil { - logrus.Debugf("unmount error %s", err) - } + daemon.Unmount(container) if err := container.toDiskLocking(); err != nil { logrus.Errorf("Error saving stopped state to disk: %v", err) } @@ -456,7 +475,7 @@ func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *stringutils.StrSlic return cmdSlice[0], cmdSlice[1:] } -func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID string) (*Container, error) { +func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID image.ID) (*Container, error) { var ( id string err error @@ -542,7 +561,7 @@ func (daemon *Daemon) GetLabels(id string) map[string]string { return container.Config.Labels } - img, err := daemon.repositories.LookupImage(id) + img, err := daemon.GetImage(id) if err == nil { return img.ContainerConfig.Labels } @@ -702,8 +721,25 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo return nil, err } - logrus.Debug("Creating images graph") - g, err := graph.NewGraph(filepath.Join(config.Root, "graph"), d.driver, uidMaps, gidMaps) + imageRoot := filepath.Join(config.Root, "image", d.driver.String()) + fms, err := layer.NewFSMetadataStore(filepath.Join(imageRoot, "layerdb")) + if err != nil { + return nil, err + } + + d.layerStore, err = layer.NewStore(fms, d.driver) + if err != nil { + return nil, err + } + + distributionPool := distribution.NewPool() + + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } + + d.imageStore, err = image.NewImageStore(ifs, d.layerStore) if err != nil { return nil, err } @@ -725,23 +761,24 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo return nil, err } - eventsService := events.New() - logrus.Debug("Creating repository list") - tagCfg := &graph.TagStoreConfig{ - Graph: g, - Key: trustKey, - Registry: registryService, - Events: eventsService, - } - repositories, err := graph.NewTagStore(filepath.Join(config.Root, "repositories-"+d.driver.String()), tagCfg) + distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) if err != nil { - return nil, fmt.Errorf("Couldn't create Tag store repositories-%s: %s", d.driver.String(), err) + return nil, err } - if restorer, ok := d.driver.(graphdriver.ImageRestorer); ok { - if _, err := restorer.RestoreCustomImages(repositories, g); err != nil { - return nil, fmt.Errorf("Couldn't restore custom images: %s", err) - } + eventsService := events.New() + + tagStore, err := tag.NewTagStore(filepath.Join(imageRoot, "repositories.json")) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + } + + if err := restoreCustomImage(d.driver, d.imageStore, d.layerStore, tagStore); err != nil { + return nil, fmt.Errorf("Couldn't restore custom images: %s", err) + } + + if err := v1.Migrate(config.Root, d.driver.String(), d.layerStore, d.imageStore, tagStore, distributionMetadataStore); err != nil { + return nil, err } // Discovery is only enabled when the daemon is launched with an address to advertise. When @@ -792,8 +829,10 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo d.repository = daemonRepo d.containers = &contStore{s: make(map[string]*Container)} d.execCommands = exec.NewStore() - d.graph = g - d.repositories = repositories + d.tagStore = tagStore + d.distributionPool = distributionPool + d.distributionMetadataStore = distributionMetadataStore + d.trustKey = trustKey d.idIndex = truncindex.NewTruncIndex([]string{}) d.configStore = config d.sysInitPath = sysInitPath @@ -910,28 +949,44 @@ func (daemon *Daemon) Shutdown() error { // Mount sets container.basefs // (is it not set coming in? why is it unset?) func (daemon *Daemon) Mount(container *Container) error { - dir, err := daemon.driver.Get(container.ID, container.getMountLabel()) - if err != nil { - return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err) + var layerID layer.ChainID + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return err + } + layerID = img.RootFS.ChainID() } + rwlayer, err := daemon.layerStore.Mount(container.ID, layerID, container.getMountLabel(), daemon.setupInitLayer) + if err != nil { + return err + } + dir, err := rwlayer.Path() + if err != nil { + return err + } + logrus.Debugf("container mounted via layerStore: %v", dir) if container.basefs != dir { // The mount path reported by the graph driver should always be trusted on Windows, since the // volume path for a given mounted layer may change over time. This should only be an error // on non-Windows operating systems. if container.basefs != "" && runtime.GOOS != "windows" { - daemon.driver.Put(container.ID) + daemon.Unmount(container) return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", daemon.driver, container.ID, container.basefs, dir) } } - container.basefs = dir + container.basefs = dir // TODO: combine these fields + container.rwlayer = rwlayer return nil } // Unmount unsets the container base filesystem -func (daemon *Daemon) Unmount(container *Container) error { - return daemon.driver.Put(container.ID) +func (daemon *Daemon) Unmount(container *Container) { + if err := daemon.layerStore.Unmount(container.ID); err != nil { + logrus.Errorf("Error unmounting container %s: %s", container.ID, err) + } } // Run uses the execution driver to run a given container @@ -962,82 +1017,46 @@ func (daemon *Daemon) unsubscribeToContainerStats(c *Container, ch chan interfac } func (daemon *Daemon) changes(container *Container) ([]archive.Change, error) { - initID := fmt.Sprintf("%s-init", container.ID) - return daemon.driver.Changes(container.ID, initID) -} - -func (daemon *Daemon) diff(container *Container) (archive.Archive, error) { - initID := fmt.Sprintf("%s-init", container.ID) - return daemon.driver.Diff(container.ID, initID) -} - -func (daemon *Daemon) createRootfs(container *Container) error { - // Step 1: create the container directory. - // This doubles as a barrier to avoid race conditions. - rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAs(container.root, 0700, rootUID, rootGID); err != nil { - return err - } - initID := fmt.Sprintf("%s-init", container.ID) - - if err := daemon.driver.Create(initID, container.ImageID, container.getMountLabel()); err != nil { - return err - } - initPath, err := daemon.driver.Get(initID, "") - if err != nil { - return err - } - - if err := setupInitLayer(initPath, rootUID, rootGID); err != nil { - if err := daemon.driver.Put(initID); err != nil { - logrus.Errorf("Failed to Put init layer: %v", err) - } - return err - } - - // We want to unmount init layer before we take snapshot of it - // for the actual container. - if err := daemon.driver.Put(initID); err != nil { - return err - } - - if err := daemon.driver.Create(container.ID, initID, ""); err != nil { - return err - } - return nil -} - -// Graph returns *graph.Graph which can be using for layers graph operations. -func (daemon *Daemon) Graph() *graph.Graph { - return daemon.graph + return daemon.layerStore.Changes(container.ID) } // TagImage creates a tag in the repository reponame, pointing to the image named // imageName. If force is true, an existing tag with the same name may be // overwritten. -func (daemon *Daemon) TagImage(repoName, tag, imageName string, force bool) error { - if err := daemon.repositories.Tag(repoName, tag, imageName, force); err != nil { +func (daemon *Daemon) TagImage(newTag reference.Named, imageName string, force bool) error { + if _, isDigested := newTag.(reference.Digested); isDigested { + return errors.New("refusing to create a tag with a digest reference") + } + if newTag.Name() == string(digest.Canonical) { + return errors.New("refusing to create an ambiguous tag using digest algorithm as name") + } + + newTag = registry.NormalizeLocalReference(newTag) + imageID, err := daemon.GetImageID(imageName) + if err != nil { return err } - daemon.EventsService.Log("tag", utils.ImageReference(repoName, tag), "") - return nil + daemon.EventsService.Log("tag", newTag.String(), "") + return daemon.tagStore.Add(newTag, imageID, force) } // PullImage initiates a pull operation. image is the repository name to pull, and // tag may be either empty, or indicate a specific tag to pull. -func (daemon *Daemon) PullImage(image string, tag string, imagePullConfig *graph.ImagePullConfig) error { - return daemon.repositories.Pull(image, tag, imagePullConfig) -} +func (daemon *Daemon) PullImage(ref reference.Named, metaHeaders map[string][]string, authConfig *cliconfig.AuthConfig, outStream io.Writer) error { + imagePullConfig := &distribution.ImagePullConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + OutStream: outStream, + RegistryService: daemon.RegistryService, + EventsService: daemon.EventsService, + MetadataStore: daemon.distributionMetadataStore, + LayerStore: daemon.layerStore, + ImageStore: daemon.imageStore, + TagStore: daemon.tagStore, + Pool: daemon.distributionPool, + } -// ImportImage imports an image, getting the archived layer data either from -// inConfig (if src is "-"), or from a URI specified in src. Progress output is -// written to outStream. Repository and tag names can optionally be given in -// the repo and tag arguments, respectively. -func (daemon *Daemon) ImportImage(src, repo, tag, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error { - return daemon.repositories.Import(src, repo, tag, msg, inConfig, outStream, containerConfig) + return distribution.Pull(ref, imagePullConfig) } // ExportImage exports a list of images to the given output stream. The @@ -1046,47 +1065,214 @@ func (daemon *Daemon) ImportImage(src, repo, tag, msg string, inConfig io.ReadCl // the same tag are exported. names is the set of tags to export, and // outStream is the writer which the images are written to. func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { - return daemon.repositories.ImageExport(names, outStream) + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.tagStore) + return imageExporter.Save(names, outStream) } // PushImage initiates a push operation on the repository named localName. -func (daemon *Daemon) PushImage(localName string, imagePushConfig *graph.ImagePushConfig) error { - return daemon.repositories.Push(localName, imagePushConfig) +func (daemon *Daemon) PushImage(ref reference.Named, metaHeaders map[string][]string, authConfig *cliconfig.AuthConfig, outStream io.Writer) error { + imagePushConfig := &distribution.ImagePushConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + OutStream: outStream, + RegistryService: daemon.RegistryService, + EventsService: daemon.EventsService, + MetadataStore: daemon.distributionMetadataStore, + LayerStore: daemon.layerStore, + ImageStore: daemon.imageStore, + TagStore: daemon.tagStore, + TrustKey: daemon.trustKey, + } + + return distribution.Push(ref, imagePushConfig) } // LookupImage looks up an image by name and returns it as an ImageInspect // structure. func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { - return daemon.repositories.Lookup(name) + img, err := daemon.GetImage(name) + if err != nil { + return nil, fmt.Errorf("No such image: %s", name) + } + + refs := daemon.tagStore.References(img.ID()) + repoTags := []string{} + repoDigests := []string{} + for _, ref := range refs { + switch ref.(type) { + case reference.Tagged: + repoTags = append(repoTags, ref.String()) + case reference.Digested: + repoDigests = append(repoDigests, ref.String()) + } + } + + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + imageInspect := &types.ImageInspect{ + ID: img.ID().String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: img.Comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: img.OS, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + } + + imageInspect.GraphDriver.Name = daemon.driver.String() + + imageInspect.GraphDriver.Data = layerMetadata + + return imageInspect, nil } // LoadImage uploads a set of images into the repository. This is the // complement of ImageExport. The input stream is an uncompressed tar // ball containing images and metadata. func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer) error { - return daemon.repositories.Load(inTar, outStream) -} - -// ListImages returns a filtered list of images. filterArgs is a JSON-encoded set -// of filter arguments which will be interpreted by pkg/parsers/filters. -// filter is a shell glob string applied to repository names. The argument -// named all controls whether all images in the graph are filtered, or just -// the heads. -func (daemon *Daemon) ListImages(filterArgs, filter string, all bool) ([]*types.Image, error) { - return daemon.repositories.Images(filterArgs, filter, all) + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.tagStore) + return imageExporter.Load(inTar, outStream) } // ImageHistory returns a slice of ImageHistory structures for the specified image // name by walking the image lineage. func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { - return daemon.repositories.History(name) + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + + history := []*types.ImageHistory{} + + layerCounter := 0 + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + for _, h := range img.History { + var layerSize int64 + + if !h.EmptyLayer { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + + rootFS.Append(img.RootFS.DiffIDs[layerCounter]) + l, err := daemon.layerStore.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + + layerCounter++ + } + + history = append([]*types.ImageHistory{{ + ID: "", + Created: h.Created.Unix(), + CreatedBy: h.CreatedBy, + Comment: h.Comment, + Size: layerSize, + }}, history...) + } + + // Fill in image IDs and tags + histImg := img + id := img.ID() + for _, h := range history { + h.ID = id.String() + + var tags []string + for _, r := range daemon.tagStore.References(id) { + if _, ok := r.(reference.NamedTagged); ok { + tags = append(tags, r.String()) + } + } + + h.Tags = tags + + id = histImg.Parent + if id == "" { + break + } + histImg, err = daemon.GetImage(id.String()) + if err != nil { + break + } + } + + return history, nil } -// GetImage returns pointer to an Image struct corresponding to the given -// name. The name can include an optional tag; otherwise the default tag will -// be used. -func (daemon *Daemon) GetImage(name string) (*image.Image, error) { - return daemon.repositories.LookupImage(name) +// GetImageID returns an image ID corresponding to the image referred to by +// refOrID. +func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { + // Treat as an ID + if id, err := digest.ParseDigest(refOrID); err == nil { + return image.ID(id), nil + } + + // Treat it as a possible tag or digest reference + if ref, err := reference.ParseNamed(refOrID); err == nil { + ref = registry.NormalizeLocalReference(ref) + if id, err := daemon.tagStore.Get(ref); err == nil { + return id, nil + } + if tagged, ok := ref.(reference.Tagged); ok { + if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil { + for _, namedRef := range daemon.tagStore.References(id) { + if namedRef.Name() == ref.Name() { + return id, nil + } + } + } + } + } + + // Search based on ID + if id, err := daemon.imageStore.Search(refOrID); err == nil { + return id, nil + } + + return "", ErrImageDoesNotExist{refOrID} +} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { + imgID, err := daemon.GetImageID(refOrID) + if err != nil { + return nil, err + } + return daemon.imageStore.Get(imgID) } func (daemon *Daemon) config() *Config { @@ -1132,33 +1318,23 @@ func (daemon *Daemon) GetRemappedUIDGID() (int, int) { // of the image with imgID, that had the same config when it was // created. nil is returned if a child cannot be found. An error is // returned if the parent image cannot be found. -func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { - // for now just exit if imgID has no children. - // maybe parentRefs in graph could be used to store - // the Image obj children for faster lookup below but this can - // be quite memory hungry. - if !daemon.Graph().HasChildren(imgID) { - return nil, nil - } - +func (daemon *Daemon) ImageGetCached(imgID image.ID, config *runconfig.Config) (*image.Image, error) { // Retrieve all images - images := daemon.Graph().Map() + imgs := daemon.Map() - // Store the tree in a map of map (map[parentId][childId]) - imageMap := make(map[string]map[string]struct{}) - for _, img := range images { - if _, exists := imageMap[img.Parent]; !exists { - imageMap[img.Parent] = make(map[string]struct{}) + var siblings []image.ID + for id, img := range imgs { + if img.Parent == imgID { + siblings = append(siblings, id) } - imageMap[img.Parent][img.ID] = struct{}{} } // Loop on the children of the given image and check the config var match *image.Image - for elem := range imageMap[imgID] { - img, ok := images[elem] + for _, id := range siblings { + img, ok := imgs[id] if !ok { - return nil, fmt.Errorf("unable to find image %q", elem) + return nil, fmt.Errorf("unable to find image %q", id) } if runconfig.Compare(&img.ContainerConfig, config) { if match == nil || match.Created.Before(img.Created) { @@ -1179,6 +1355,12 @@ func tempDir(rootDir string, rootUID, rootGID int) (string, error) { } func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error { + container.Lock() + if err := parseSecurityOpt(container, hostConfig); err != nil { + container.Unlock() + return err + } + container.Unlock() // Do not lock while creating volumes since this could be calling out to external plugins // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin @@ -1199,6 +1381,11 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig. return nil } +func (daemon *Daemon) setupInitLayer(initPath string) error { + rootUID, rootGID := daemon.GetRemappedUIDGID() + return setupInitLayer(initPath, rootUID, rootGID) +} + func setDefaultMtu(config *Config) { // do nothing if the config does not have the default 0 value. if config.Mtu != 0 { diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go index 15b6bc6922..520ccf087c 100644 --- a/daemon/daemon_unix.go +++ b/daemon/daemon_unix.go @@ -14,12 +14,15 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" derr "github.com/docker/docker/errors" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" pblkiodev "github.com/docker/docker/pkg/blkiodev" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" + "github.com/docker/docker/tag" "github.com/docker/libnetwork" nwconfig "github.com/docker/libnetwork/config" "github.com/docker/libnetwork/drivers/bridge" @@ -601,9 +604,7 @@ func (daemon *Daemon) conditionalMountOnStart(container *Container) error { // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *Container) { - if err := daemon.Unmount(container); err != nil { - logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) - } + daemon.Unmount(container) } // getDefaultRouteMtu returns the MTU for the default route's interface. @@ -624,3 +625,8 @@ func getDefaultRouteMtu() (int, error) { } return 0, errNoDefaultRoute } + +func restoreCustomImage(driver graphdriver.Driver, is image.Store, ls layer.Store, ts tag.Store) error { + // Unix has no custom images to register + return nil +} diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go index 219a5c3786..53dad672dc 100644 --- a/daemon/daemon_windows.go +++ b/daemon/daemon_windows.go @@ -1,12 +1,22 @@ package daemon import ( + "encoding/json" + "errors" "fmt" + "path/filepath" + "runtime" + "strings" "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/tag" // register the windows graph driver - _ "github.com/docker/docker/daemon/graphdriver/windows" + "github.com/docker/docker/daemon/graphdriver/windows" "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/docker/libnetwork" @@ -128,8 +138,71 @@ func (daemon *Daemon) conditionalMountOnStart(container *Container) error { func (daemon *Daemon) conditionalUnmountOnCleanup(container *Container) { // We do not unmount if a Hyper-V container if !container.hostConfig.Isolation.IsHyperV() { - if err := daemon.Unmount(container); err != nil { - logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) - } + daemon.Unmount(container) } } + +func restoreCustomImage(driver graphdriver.Driver, is image.Store, ls layer.Store, ts tag.Store) error { + if wd, ok := driver.(*windows.Driver); ok { + imageInfos, err := wd.GetCustomImageInfos() + if err != nil { + return err + } + + // Convert imageData to valid image configuration + for i := range imageInfos { + name := strings.ToLower(imageInfos[i].Name) + + type registrar interface { + RegisterDiffID(graphID string, size int64) (layer.Layer, error) + } + r, ok := ls.(registrar) + if !ok { + return errors.New("Layerstore doesn't support RegisterDiffID") + } + if _, err := r.RegisterDiffID(imageInfos[i].ID, imageInfos[i].Size); err != nil { + return err + } + // layer is intentionally not released + + rootFS := image.NewRootFS() + rootFS.BaseLayer = filepath.Base(imageInfos[i].Path) + + // Create history for base layer + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Created: imageInfos[i].CreatedTime, + }, + RootFS: rootFS, + History: []image.History{}, + }) + + named, err := reference.ParseNamed(name) + if err != nil { + return err + } + + ref, err := reference.WithTag(named, imageInfos[i].Version) + if err != nil { + return err + } + + id, err := is.Create(config) + if err != nil { + return err + } + + if err := ts.Add(ref, id, true); err != nil { + return err + } + + logrus.Debugf("Registered base layer %s as %s", ref, id) + } + + } + + return nil +} diff --git a/daemon/daemonbuilder/builder.go b/daemon/daemonbuilder/builder.go index 0b5528f16f..4a94d9489e 100644 --- a/daemon/daemonbuilder/builder.go +++ b/daemon/daemonbuilder/builder.go @@ -9,17 +9,16 @@ import ( "strings" "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" "github.com/docker/docker/api" "github.com/docker/docker/builder" "github.com/docker/docker/cliconfig" "github.com/docker/docker/daemon" - "github.com/docker/docker/graph" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" @@ -44,15 +43,24 @@ func (d Docker) LookupImage(name string) (*image.Image, error) { // Pull tells Docker to pull image referenced by `name`. func (d Docker) Pull(name string) (*image.Image, error) { - remote, tag := parsers.ParseRepositoryTag(name) - if tag == "" { - tag = "latest" + ref, err := reference.ParseNamed(name) + if err != nil { + return nil, err + } + switch ref.(type) { + case reference.Tagged: + case reference.Digested: + default: + ref, err = reference.WithTag(ref, "latest") + if err != nil { + return nil, err + } } pullRegistryAuth := &cliconfig.AuthConfig{} if len(d.AuthConfigs) > 0 { // The request came with a full auth config file, we prefer to use that - repoInfo, err := d.Daemon.RegistryService.ResolveRepository(remote) + repoInfo, err := d.Daemon.RegistryService.ResolveRepository(ref) if err != nil { return nil, err } @@ -64,12 +72,7 @@ func (d Docker) Pull(name string) (*image.Image, error) { pullRegistryAuth = &resolvedConfig } - imagePullConfig := &graph.ImagePullConfig{ - AuthConfig: pullRegistryAuth, - OutStream: ioutils.NopWriteCloser(d.OutOld), - } - - if err := d.Daemon.PullImage(remote, tag, imagePullConfig); err != nil { + if err := d.Daemon.PullImage(ref, nil, pullRegistryAuth, ioutils.NopWriteCloser(d.OutOld)); err != nil { return nil, err } @@ -106,18 +109,20 @@ func (d Docker) Remove(id string, cfg *daemon.ContainerRmConfig) error { } // Commit creates a new Docker image from an existing Docker container. -func (d Docker) Commit(name string, cfg *daemon.ContainerCommitConfig) (*image.Image, error) { +func (d Docker) Commit(name string, cfg *daemon.ContainerCommitConfig) (string, error) { return d.Daemon.Commit(name, cfg) } // Retain retains an image avoiding it to be removed or overwritten until a corresponding Release() call. func (d Docker) Retain(sessionID, imgID string) { - d.Daemon.Graph().Retain(sessionID, imgID) + // FIXME: This will be solved with tags in client-side builder + //d.Daemon.Graph().Retain(sessionID, imgID) } // Release releases a list of images that were retained for the time of a build. func (d Docker) Release(sessionID string, activeImages []string) { - d.Daemon.Graph().Release(sessionID, activeImages...) + // FIXME: This will be solved with tags in client-side builder + //d.Daemon.Graph().Release(sessionID, activeImages...) } // Copy copies/extracts a source FileInfo to a destination path inside a container @@ -199,11 +204,11 @@ func (d Docker) Copy(c *daemon.Container, destPath string, src builder.FileInfo, // GetCachedImage returns a reference to a cached image whose parent equals `parent` // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. func (d Docker) GetCachedImage(imgID string, cfg *runconfig.Config) (string, error) { - cache, err := d.Daemon.ImageGetCached(imgID, cfg) + cache, err := d.Daemon.ImageGetCached(image.ID(imgID), cfg) if cache == nil || err != nil { return "", err } - return cache.ID, nil + return cache.ID().String(), nil } // Kill stops the container execution abruptly. @@ -218,7 +223,8 @@ func (d Docker) Mount(c *daemon.Container) error { // Unmount unmounts the root filesystem for the container. func (d Docker) Unmount(c *daemon.Container) error { - return d.Daemon.Unmount(c) + d.Daemon.Unmount(c) + return nil } // Start starts a container diff --git a/daemon/delete.go b/daemon/delete.go index 018513cfad..c06a319c6b 100644 --- a/daemon/delete.go +++ b/daemon/delete.go @@ -1,12 +1,12 @@ package daemon import ( - "fmt" "os" "path" "github.com/Sirupsen/logrus" derr "github.com/docker/docker/errors" + "github.com/docker/docker/layer" volumestore "github.com/docker/docker/volume/store" ) @@ -119,15 +119,12 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) { logrus.Debugf("Unable to remove container from link graph: %s", err) } - if err = daemon.driver.Remove(container.ID); err != nil { + metadata, err := daemon.layerStore.DeleteMount(container.ID) + layer.LogReleaseMetadata(metadata) + if err != nil { return derr.ErrorCodeRmDriverFS.WithArgs(daemon.driver, container.ID, err) } - initID := fmt.Sprintf("%s-init", container.ID) - if err := daemon.driver.Remove(initID); err != nil { - return derr.ErrorCodeRmInit.WithArgs(daemon.driver, initID, err) - } - if err = os.RemoveAll(container.root); err != nil { return derr.ErrorCodeRmFS.WithArgs(container.ID, err) } diff --git a/daemon/errors.go b/daemon/errors.go index 45a4882085..8ab411c0f0 100644 --- a/daemon/errors.go +++ b/daemon/errors.go @@ -3,21 +3,25 @@ package daemon import ( "strings" + "github.com/docker/distribution/reference" derr "github.com/docker/docker/errors" - "github.com/docker/docker/graph/tags" - "github.com/docker/docker/pkg/parsers" + tagpkg "github.com/docker/docker/tag" ) -func (d *Daemon) graphNotExistToErrcode(imageName string, err error) error { - if d.Graph().IsNotExist(err, imageName) { - if strings.Contains(imageName, "@") { - return derr.ErrorCodeNoSuchImageHash.WithArgs(imageName) +func (d *Daemon) imageNotExistToErrcode(err error) error { + if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { + if strings.Contains(dne.RefOrID, "@") { + return derr.ErrorCodeNoSuchImageHash.WithArgs(dne.RefOrID) } - img, tag := parsers.ParseRepositoryTag(imageName) - if tag == "" { - tag = tags.DefaultTag + tag := tagpkg.DefaultTag + ref, err := reference.ParseNamed(dne.RefOrID) + if err != nil { + return derr.ErrorCodeNoSuchImageTag.WithArgs(dne.RefOrID, tag) } - return derr.ErrorCodeNoSuchImageTag.WithArgs(img, tag) + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + return derr.ErrorCodeNoSuchImageTag.WithArgs(ref.Name(), tag) } return err } diff --git a/daemon/events/filter.go b/daemon/events/filter.go index 49bbbe8a05..ae7fba93d1 100644 --- a/daemon/events/filter.go +++ b/daemon/events/filter.go @@ -1,8 +1,8 @@ package events import ( + "github.com/docker/distribution/reference" "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/filters" ) @@ -38,8 +38,11 @@ func (ef *Filter) isLabelFieldIncluded(id string) bool { // against the stripped repo name without any tags. func (ef *Filter) isImageIncluded(eventID string, eventFrom string) bool { stripTag := func(image string) string { - repo, _ := parsers.ParseRepositoryTag(image) - return repo + ref, err := reference.ParseNamed(image) + if err != nil { + return image + } + return ref.Name() } return isFieldIncluded(eventID, ef.filter["image"]) || diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go index ea0b053aa3..5a3493250b 100644 --- a/daemon/graphdriver/fsdiff.go +++ b/daemon/graphdriver/fsdiff.go @@ -1,5 +1,3 @@ -// +build daemon - package graphdriver import ( @@ -13,6 +11,12 @@ import ( "github.com/docker/docker/pkg/ioutils" ) +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + // NaiveDiffDriver takes a ProtoDriver and adds the // capability of the Diffing methods which it may or may not // support on its own. See the comment on the exported @@ -129,7 +133,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (s GIDMaps: gdw.gidMaps} start := time.Now().UTC() logrus.Debugf("Start untar layer") - if size, err = chrootarchive.ApplyUncompressedLayer(layerFs, diff, options); err != nil { + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { return } logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) diff --git a/daemon/graphdriver/imagerestorer.go b/daemon/graphdriver/imagerestorer.go deleted file mode 100644 index d6592203b5..0000000000 --- a/daemon/graphdriver/imagerestorer.go +++ /dev/null @@ -1,31 +0,0 @@ -package graphdriver - -import ( - "io" - - "github.com/docker/docker/image" -) - -// NOTE: These interfaces are used for implementing specific features of the Windows -// graphdriver implementation. The current versions are a short-term solution and -// likely to change or possibly be eliminated, so avoid using them outside of the Windows -// graphdriver code. - -// ImageRestorer interface allows the implementer to add a custom image to -// the graph and tagstore. -type ImageRestorer interface { - RestoreCustomImages(tagger Tagger, recorder Recorder) ([]string, error) -} - -// Tagger is an interface that exposes the TagStore.Tag function without needing -// to import graph. -type Tagger interface { - Tag(repoName, tag, imageName string, force bool) error -} - -// Recorder is an interface that exposes the Graph.Register and Graph.Exists -// functions without needing to import graph. -type Recorder interface { - Exists(id string) bool - Register(img image.Descriptor, layerData io.Reader) error -} diff --git a/daemon/graphdriver/plugin.go b/daemon/graphdriver/plugin.go index e816cd0d84..d63161b074 100644 --- a/daemon/graphdriver/plugin.go +++ b/daemon/graphdriver/plugin.go @@ -1,5 +1,4 @@ // +build experimental -// +build daemon package graphdriver diff --git a/daemon/graphdriver/proxy.go b/daemon/graphdriver/proxy.go index 35a06da415..47128473b0 100644 --- a/daemon/graphdriver/proxy.go +++ b/daemon/graphdriver/proxy.go @@ -1,5 +1,4 @@ // +build experimental -// +build daemon package graphdriver diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go index d2b807abc1..ef90f19142 100644 --- a/daemon/graphdriver/vfs/driver.go +++ b/daemon/graphdriver/vfs/driver.go @@ -1,5 +1,3 @@ -// +build daemon - package vfs import ( @@ -14,6 +12,11 @@ import ( "github.com/opencontainers/runc/libcontainer/label" ) +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.CopyWithTar +) + func init() { graphdriver.Register("vfs", Init) } @@ -89,7 +92,7 @@ func (d *Driver) Create(id, parent, mountLabel string) error { if err != nil { return fmt.Errorf("%s: %s", parent, err) } - if err := chrootarchive.CopyWithTar(parentDir, dir); err != nil { + if err := CopyWithTar(parentDir, dir); err != nil { return err } return nil diff --git a/daemon/graphdriver/vfs/driver_unsupported.go b/daemon/graphdriver/vfs/driver_unsupported.go deleted file mode 100644 index 474a4c5c18..0000000000 --- a/daemon/graphdriver/vfs/driver_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !daemon - -package vfs diff --git a/daemon/graphdriver/windows/windows.go b/daemon/graphdriver/windows/windows.go index 007c6f907f..0594225268 100644 --- a/daemon/graphdriver/windows/windows.go +++ b/daemon/graphdriver/windows/windows.go @@ -6,10 +6,10 @@ import ( "crypto/sha512" "encoding/json" "fmt" + "io" "io/ioutil" "os" "path/filepath" - "runtime" "strconv" "strings" "sync" @@ -17,8 +17,6 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" @@ -40,26 +38,6 @@ const ( filterDriver ) -// CustomImageDescriptor is an image descriptor for use by RestoreCustomImages -type customImageDescriptor struct { - img *image.Image -} - -// ID returns the image ID specified in the image structure. -func (img customImageDescriptor) ID() string { - return img.img.ID -} - -// Parent returns the parent ID - in this case, none -func (img customImageDescriptor) Parent() string { - return "" -} - -// MarshalConfig renders the image structure into JSON. -func (img customImageDescriptor) MarshalConfig() ([]byte, error) { - return json.Marshal(img.img) -} - // Driver represents a windows graph driver. type Driver struct { // info stores the shim driver information @@ -195,7 +173,7 @@ func (d *Driver) Remove(id string) error { if err != nil { return err } - + os.RemoveAll(filepath.Join(d.info.HomeDir, "sysfile-backups", rID)) // ok to fail return hcsshim.DestroyLayer(d.info, rID) } @@ -402,22 +380,27 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) { return archive.ChangesSize(layerFs, changes), nil } -// RestoreCustomImages adds any auto-detected OS specific images to the tag and graph store. -func (d *Driver) RestoreCustomImages(tagger graphdriver.Tagger, recorder graphdriver.Recorder) (imageIDs []string, err error) { +// CustomImageInfo is the object returned by the driver describing the base +// image. +type CustomImageInfo struct { + ID string + Name string + Version string + Path string + Size int64 + CreatedTime time.Time +} + +// GetCustomImageInfos returns the image infos for window specific +// base images which should always be present. +func (d *Driver) GetCustomImageInfos() ([]CustomImageInfo, error) { strData, err := hcsshim.GetSharedBaseImages() if err != nil { return nil, fmt.Errorf("Failed to restore base images: %s", err) } - type customImageInfo struct { - Name string - Version string - Path string - Size int64 - CreatedTime time.Time - } type customImageInfoList struct { - Images []customImageInfo + Images []CustomImageInfo } var infoData customImageInfoList @@ -428,43 +411,28 @@ func (d *Driver) RestoreCustomImages(tagger graphdriver.Tagger, recorder graphdr return nil, err } + var images []CustomImageInfo + for _, imageData := range infoData.Images { - _, folderName := filepath.Split(imageData.Path) + folderName := filepath.Base(imageData.Path) // Use crypto hash of the foldername to generate a docker style id. h := sha512.Sum384([]byte(folderName)) id := fmt.Sprintf("%x", h[:32]) - if !recorder.Exists(id) { - // Register the image. - img := &image.Image{ - ID: id, - Created: imageData.CreatedTime, - DockerVersion: dockerversion.Version, - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - Size: imageData.Size, - } - - if err := recorder.Register(customImageDescriptor{img}, nil); err != nil { - return nil, err - } - - // Create tags for the new image. - if err := tagger.Tag(strings.ToLower(imageData.Name), imageData.Version, img.ID, true); err != nil { - return nil, err - } - - // Create the alternate ID file. - if err := d.setID(img.ID, folderName); err != nil { - return nil, err - } - - imageIDs = append(imageIDs, img.ID) + if err := d.Create(id, "", ""); err != nil { + return nil, err } + // Create the alternate ID file. + if err := d.setID(id, folderName); err != nil { + return nil, err + } + + imageData.ID = id + images = append(images, imageData) } - return imageIDs, nil + return images, nil } // GetMetadata returns custom driver information. @@ -533,6 +501,10 @@ func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPat if size, err = chrootarchive.ApplyLayer(tempFolder, layerData); err != nil { return } + err = copySysFiles(tempFolder, filepath.Join(d.info.HomeDir, "sysfile-backups", id)) + if err != nil { + return + } logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) if err = hcsshim.ImportLayer(d.info, id, tempFolder, parentLayerPaths); err != nil { @@ -596,3 +568,103 @@ func (d *Driver) setLayerChain(id string, chain []string) error { return nil } + +// DiffPath returns a directory that contains files needed to construct layer diff. +func (d *Driver) DiffPath(id string) (path string, release func() error, err error) { + id, err = d.resolveID(id) + if err != nil { + return + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(id) + if err != nil { + return + } + + layerFolder := d.dir(id) + tempFolder := layerFolder + "-" + strconv.FormatUint(uint64(random.Rand.Uint32()), 10) + if err = os.MkdirAll(tempFolder, 0755); err != nil { + logrus.Errorf("Could not create %s %s", tempFolder, err) + return + } + + defer func() { + if err != nil { + _, folderName := filepath.Split(tempFolder) + if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { + logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) + } + } + }() + + if err = hcsshim.ExportLayer(d.info, id, tempFolder, layerChain); err != nil { + return + } + + err = copySysFiles(filepath.Join(d.info.HomeDir, "sysfile-backups", id), tempFolder) + if err != nil { + return + } + + return tempFolder, func() error { + // TODO: activate layers and release here? + _, folderName := filepath.Split(tempFolder) + return hcsshim.DestroyLayer(d.info, folderName) + }, nil +} + +var sysFileWhiteList = []string{ + "Hives\\*", + "Files\\BOOTNXT", + "tombstones.txt", +} + +// note this only handles files +func copySysFiles(src string, dest string) error { + if err := os.MkdirAll(dest, 0700); err != nil { + return err + } + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + rel, err := filepath.Rel(src, path) + if err != nil { + return err + } + for _, sysfile := range sysFileWhiteList { + if matches, err := filepath.Match(sysfile, rel); err != nil || !matches { + continue + } + + fi, err := os.Lstat(path) + if err != nil { + return err + } + + if !fi.Mode().IsRegular() { + continue + } + + targetPath := filepath.Join(dest, rel) + if err = os.MkdirAll(filepath.Dir(targetPath), 0700); err != nil { + return err + } + + in, err := os.Open(path) + if err != nil { + return err + } + out, err := os.Create(targetPath) + if err != nil { + in.Close() + return err + } + _, err = io.Copy(out, in) + in.Close() + out.Close() + if err != nil { + return err + } + } + return nil + }) +} diff --git a/daemon/image_delete.go b/daemon/image_delete.go index ca8b82a1f3..3847be1ca3 100644 --- a/daemon/image_delete.go +++ b/daemon/image_delete.go @@ -4,13 +4,12 @@ import ( "fmt" "strings" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" derr "github.com/docker/docker/errors" - "github.com/docker/docker/graph/tags" "github.com/docker/docker/image" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/utils" + tagpkg "github.com/docker/docker/tag" ) // ImageDelete deletes the image referenced by the given imageRef from this @@ -53,39 +52,46 @@ import ( func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { records := []types.ImageDelete{} - img, err := daemon.repositories.LookupImage(imageRef) + imgID, err := daemon.GetImageID(imageRef) if err != nil { - return nil, daemon.graphNotExistToErrcode(imageRef, err) + return nil, daemon.imageNotExistToErrcode(err) } + repoRefs := daemon.tagStore.References(imgID) + var removedRepositoryRef bool - if !isImageIDPrefix(img.ID, imageRef) { + if !isImageIDPrefix(imgID.String(), imageRef) { // A repository reference was given and should be removed // first. We can only remove this reference if either force is // true, there are multiple repository references to this // image, or there are no containers using the given reference. - if !(force || daemon.imageHasMultipleRepositoryReferences(img.ID)) { - if container := daemon.getContainerUsingImage(img.ID); container != nil { + if !(force || len(repoRefs) > 1) { + if container := daemon.getContainerUsingImage(imgID); container != nil { // If we removed the repository reference then // this image would remain "dangling" and since // we really want to avoid that the client must // explicitly force its removal. - return nil, derr.ErrorCodeImgDelUsed.WithArgs(imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(img.ID)) + return nil, derr.ErrorCodeImgDelUsed.WithArgs(imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) } } - parsedRef, err := daemon.removeImageRef(imageRef) + parsedRef, err := reference.ParseNamed(imageRef) if err != nil { return nil, err } - untaggedRecord := types.ImageDelete{Untagged: parsedRef} + parsedRef, err = daemon.removeImageRef(parsedRef) + if err != nil { + return nil, err + } - daemon.EventsService.Log("untag", img.ID, "") + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.EventsService.Log("untag", imgID.String(), "") records = append(records, untaggedRecord) // If has remaining references then untag finishes the remove - if daemon.repositories.HasReferences(img) { + if len(repoRefs) > 1 { return records, nil } @@ -95,38 +101,39 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I // repository reference to the image then we will want to // remove that reference. // FIXME: Is this the behavior we want? - repoRefs := daemon.repositories.ByID()[img.ID] if len(repoRefs) == 1 { parsedRef, err := daemon.removeImageRef(repoRefs[0]) if err != nil { return nil, err } - untaggedRecord := types.ImageDelete{Untagged: parsedRef} + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} - daemon.EventsService.Log("untag", img.ID, "") + daemon.EventsService.Log("untag", imgID.String(), "") records = append(records, untaggedRecord) } } - return records, daemon.imageDeleteHelper(img, &records, force, prune, removedRepositoryRef) + return records, daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef) } // isImageIDPrefix returns whether the given possiblePrefix is a prefix of the // given imageID. func isImageIDPrefix(imageID, possiblePrefix string) bool { - return strings.HasPrefix(imageID, possiblePrefix) -} + if strings.HasPrefix(imageID, possiblePrefix) { + return true + } -// imageHasMultipleRepositoryReferences returns whether there are multiple -// repository references to the given imageID. -func (daemon *Daemon) imageHasMultipleRepositoryReferences(imageID string) bool { - return len(daemon.repositories.ByID()[imageID]) > 1 + if i := strings.IndexRune(imageID, ':'); i >= 0 { + return strings.HasPrefix(imageID[i+1:], possiblePrefix) + } + + return false } // getContainerUsingImage returns a container that was created using the given // imageID. Returns nil if there is no such container. -func (daemon *Daemon) getContainerUsingImage(imageID string) *Container { +func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *Container { for _, container := range daemon.List() { if container.ImageID == imageID { return container @@ -141,18 +148,24 @@ func (daemon *Daemon) getContainerUsingImage(imageID string) *Container { // repositoryRef must not be an image ID but a repository name followed by an // optional tag or digest reference. If tag or digest is omitted, the default // tag is used. Returns the resolved image reference and an error. -func (daemon *Daemon) removeImageRef(repositoryRef string) (string, error) { - repository, ref := parsers.ParseRepositoryTag(repositoryRef) - if ref == "" { - ref = tags.DefaultTag +func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { + switch ref.(type) { + case reference.Tagged: + case reference.Digested: + default: + var err error + ref, err = reference.WithTag(ref, tagpkg.DefaultTag) + if err != nil { + return nil, err + } } // Ignore the boolean value returned, as far as we're concerned, this // is an idempotent operation and it's okay if the reference didn't // exist in the first place. - _, err := daemon.repositories.Delete(repository, ref) + _, err := daemon.tagStore.Delete(ref) - return utils.ImageReference(repository, ref), err + return ref, err } // removeAllReferencesToImageID attempts to remove every reference to the given @@ -160,8 +173,8 @@ func (daemon *Daemon) removeImageRef(repositoryRef string) (string, error) { // on the first encountered error. Removed references are logged to this // daemon's event service. An "Untagged" types.ImageDelete is added to the // given list of records. -func (daemon *Daemon) removeAllReferencesToImageID(imgID string, records *[]types.ImageDelete) error { - imageRefs := daemon.repositories.ByID()[imgID] +func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error { + imageRefs := daemon.tagStore.References(imgID) for _, imageRef := range imageRefs { parsedRef, err := daemon.removeImageRef(imageRef) @@ -169,9 +182,9 @@ func (daemon *Daemon) removeAllReferencesToImageID(imgID string, records *[]type return err } - untaggedRecord := types.ImageDelete{Untagged: parsedRef} + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} - daemon.EventsService.Log("untag", imgID, "") + daemon.EventsService.Log("untag", imgID.String(), "") *records = append(*records, untaggedRecord) } @@ -182,7 +195,7 @@ func (daemon *Daemon) removeAllReferencesToImageID(imgID string, records *[]type // Implements the error interface. type imageDeleteConflict struct { hard bool - imgID string + imgID image.ID message string } @@ -194,7 +207,7 @@ func (idc *imageDeleteConflict) Error() string { forceMsg = "must be forced" } - return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID), forceMsg, idc.message) + return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) } // imageDeleteHelper attempts to delete the given image from this daemon. If @@ -208,11 +221,11 @@ func (idc *imageDeleteConflict) Error() string { // conflict is encountered, it will be returned immediately without deleting // the image. If quiet is true, any encountered conflicts will be ignored and // the function will return nil immediately without deleting the image. -func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.ImageDelete, force, prune, quiet bool) error { +func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error { // First, determine if this image has any conflicts. Ignore soft conflicts // if force is true. - if conflict := daemon.checkImageDeleteConflict(img, force); conflict != nil { - if quiet && !daemon.imageIsDangling(img) { + if conflict := daemon.checkImageDeleteConflict(imgID, force); conflict != nil { + if quiet && !daemon.imageIsDangling(imgID) { // Ignore conflicts UNLESS the image is "dangling" in // which case we want the user to know. return nil @@ -223,33 +236,38 @@ func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.Image return conflict } + parent, err := daemon.imageStore.GetParent(imgID) + if err != nil { + // There may be no parent + parent = "" + } + // Delete all repository tag/digest references to this image. - if err := daemon.removeAllReferencesToImageID(img.ID, records); err != nil { + if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { return err } - if err := daemon.Graph().Delete(img.ID); err != nil { + removedLayers, err := daemon.imageStore.Delete(imgID) + if err != nil { return err } - daemon.EventsService.Log("delete", img.ID, "") - *records = append(*records, types.ImageDelete{Deleted: img.ID}) + daemon.EventsService.Log("delete", imgID.String(), "") + *records = append(*records, types.ImageDelete{Deleted: imgID.String()}) + for _, removedLayer := range removedLayers { + *records = append(*records, types.ImageDelete{Deleted: removedLayer.ChainID.String()}) + } - if !prune || img.Parent == "" { + if !prune || parent == "" { return nil } // We need to prune the parent image. This means delete it if there are // no tags/digests referencing it and there are no containers using it ( // either running or stopped). - parentImg, err := daemon.Graph().Get(img.Parent) - if err != nil { - return derr.ErrorCodeImgNoParent.WithArgs(err) - } - // Do not force prunings, but do so quietly (stopping on any encountered // conflicts). - return daemon.imageDeleteHelper(parentImg, records, false, true, true) + return daemon.imageDeleteHelper(parent, records, false, true, true) } // checkImageDeleteConflict determines whether there are any conflicts @@ -258,9 +276,9 @@ func (daemon *Daemon) imageDeleteHelper(img *image.Image, records *[]types.Image // using the image. A soft conflict is any tags/digest referencing the given // image or any stopped container using the image. If ignoreSoftConflicts is // true, this function will not check for soft conflict conditions. -func (daemon *Daemon) checkImageDeleteConflict(img *image.Image, ignoreSoftConflicts bool) *imageDeleteConflict { +func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, ignoreSoftConflicts bool) *imageDeleteConflict { // Check for hard conflicts first. - if conflict := daemon.checkImageDeleteHardConflict(img); conflict != nil { + if conflict := daemon.checkImageDeleteHardConflict(imgID); conflict != nil { return conflict } @@ -270,24 +288,15 @@ func (daemon *Daemon) checkImageDeleteConflict(img *image.Image, ignoreSoftConfl return nil } - return daemon.checkImageDeleteSoftConflict(img) + return daemon.checkImageDeleteSoftConflict(imgID) } -func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDeleteConflict { - // Check if the image ID is being used by a pull or build. - if daemon.Graph().IsHeld(img.ID) { - return &imageDeleteConflict{ - hard: true, - imgID: img.ID, - message: "image is held by an ongoing pull or build", - } - } - +func (daemon *Daemon) checkImageDeleteHardConflict(imgID image.ID) *imageDeleteConflict { // Check if the image has any descendent images. - if daemon.Graph().HasChildren(img.ID) { + if len(daemon.imageStore.Children(imgID)) > 0 { return &imageDeleteConflict{ hard: true, - imgID: img.ID, + imgID: imgID, message: "image has dependent child images", } } @@ -299,9 +308,9 @@ func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDelet continue } - if container.ImageID == img.ID { + if container.ImageID == imgID { return &imageDeleteConflict{ - imgID: img.ID, + imgID: imgID, hard: true, message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), } @@ -311,11 +320,11 @@ func (daemon *Daemon) checkImageDeleteHardConflict(img *image.Image) *imageDelet return nil } -func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDeleteConflict { +func (daemon *Daemon) checkImageDeleteSoftConflict(imgID image.ID) *imageDeleteConflict { // Check if any repository tags/digest reference this image. - if daemon.repositories.HasReferences(img) { + if len(daemon.tagStore.References(imgID)) > 0 { return &imageDeleteConflict{ - imgID: img.ID, + imgID: imgID, message: "image is referenced in one or more repositories", } } @@ -327,9 +336,9 @@ func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDelet continue } - if container.ImageID == img.ID { + if container.ImageID == imgID { return &imageDeleteConflict{ - imgID: img.ID, + imgID: imgID, message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), } } @@ -341,6 +350,6 @@ func (daemon *Daemon) checkImageDeleteSoftConflict(img *image.Image) *imageDelet // imageIsDangling returns whether the given image is "dangling" which means // that there are no repository references to the given image and it has no // child images. -func (daemon *Daemon) imageIsDangling(img *image.Image) bool { - return !(daemon.repositories.HasReferences(img) || daemon.Graph().HasChildren(img.ID)) +func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { + return !(len(daemon.tagStore.References(imgID)) > 0 || len(daemon.imageStore.Children(imgID)) > 0) } diff --git a/daemon/images.go b/daemon/images.go new file mode 100644 index 0000000000..b4c506ee05 --- /dev/null +++ b/daemon/images.go @@ -0,0 +1,163 @@ +package daemon + +import ( + "fmt" + "path" + "sort" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/parsers/filters" +) + +var acceptedImageFilterTags = map[string]struct{}{ + "dangling": {}, + "label": {}, +} + +// byCreated is a temporary type used to sort a list of images by creation +// time. +type byCreated []*types.Image + +func (r byCreated) Len() int { return len(r) } +func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } + +// Map returns a map of all images in the ImageStore +func (daemon *Daemon) Map() map[image.ID]*image.Image { + return daemon.imageStore.Map() +} + +// Images returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by pkg/parsers/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Image, error) { + var ( + allImages map[image.ID]*image.Image + err error + danglingOnly = false + ) + + imageFilters, err := filters.FromParam(filterArgs) + if err != nil { + return nil, err + } + for name := range imageFilters { + if _, ok := acceptedImageFilterTags[name]; !ok { + return nil, fmt.Errorf("Invalid filter '%s'", name) + } + } + + if i, ok := imageFilters["dangling"]; ok { + for _, value := range i { + if v := strings.ToLower(value); v == "true" { + danglingOnly = true + } else if v != "false" { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", v) + } + } + } + + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + + images := []*types.Image{} + + var filterTagged bool + if filter != "" { + filterRef, err := reference.Parse(filter) + if err == nil { // parse error means wildcard repo + if _, ok := filterRef.(reference.Tagged); ok { + filterTagged = true + } + } + } + + for id, img := range allImages { + if _, ok := imageFilters["label"]; ok { + if img.Config == nil { + // Very old image that do not have image.Config (or even labels) + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", img.Config.Labels) { + continue + } + } + + layerID := img.RootFS.ChainID() + var size int64 + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + + size, err = l.Size() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + } + + newImage := newImage(img, size) + + for _, ref := range daemon.tagStore.References(id) { + if filter != "" { // filter by tag/repo name + if filterTagged { // filter by tag, require full ref match + if ref.String() != filter { + continue + } + } else if matched, err := path.Match(filter, ref.Name()); !matched || err != nil { // name only match, FIXME: docs say exact + continue + } + } + if _, ok := ref.(reference.Digested); ok { + newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) + } + if _, ok := ref.(reference.Tagged); ok { + newImage.RepoTags = append(newImage.RepoTags, ref.String()) + } + } + if newImage.RepoDigests == nil && newImage.RepoTags == nil { + if all || len(daemon.imageStore.Children(id)) == 0 { + if filter != "" { // skip images with no references if filtering by tag + continue + } + newImage.RepoDigests = []string{"@"} + newImage.RepoTags = []string{":"} + } else { + continue + } + } else if danglingOnly { + continue + } + + images = append(images, newImage) + } + + sort.Sort(sort.Reverse(byCreated(images))) + + return images, nil +} + +func newImage(image *image.Image, size int64) *types.Image { + newImage := new(types.Image) + newImage.ParentID = image.Parent.String() + newImage.ID = image.ID().String() + newImage.Created = image.Created.Unix() + newImage.Size = size + newImage.VirtualSize = size + if image.Config != nil { + newImage.Labels = image.Config.Labels + } + return newImage +} diff --git a/daemon/import.go b/daemon/import.go new file mode 100644 index 0000000000..ac7247ae7d --- /dev/null +++ b/daemon/import.go @@ -0,0 +1,111 @@ +package daemon + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "runtime" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/progressreader" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/runconfig" +) + +// ImportImage imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (daemon *Daemon) ImportImage(src string, newRef reference.Named, msg string, inConfig io.ReadCloser, outStream io.Writer, config *runconfig.Config) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + archive io.ReadCloser + resp *http.Response + ) + + if src == "-" { + archive = inConfig + } else { + inConfig.Close() + u, err := url.Parse(src) + if err != nil { + return err + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = httputils.Download(u.String()) + if err != nil { + return err + } + progressReader := progressreader.New(progressreader.Config{ + In: resp.Body, + Out: outStream, + Formatter: sf, + Size: resp.ContentLength, + NewLines: true, + ID: "", + Action: "Importing", + }) + archive = progressReader + } + + defer archive.Close() + if len(msg) == 0 { + msg = "Imported from " + src + } + // TODO: support windows baselayer? + l, err := daemon.layerStore.Register(archive, "") + if err != nil { + return err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + created := time.Now().UTC() + imgConfig, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Created: created, + Comment: msg, + }, + RootFS: &image.RootFS{ + Type: "layers", + DiffIDs: []layer.DiffID{l.DiffID()}, + }, + History: []image.History{{ + Created: created, + Comment: msg, + }}, + }) + if err != nil { + return err + } + + id, err := daemon.imageStore.Create(imgConfig) + if err != nil { + return err + } + + // FIXME: connect with commit code and call tagstore directly + if newRef != nil { + if err := daemon.TagImage(newRef, id.String(), true); err != nil { + return err + } + } + + outStream.Write(sf.FormatStatus("", id.String())) + daemon.EventsService.Log("import", id.String(), "") + return nil +} diff --git a/daemon/info.go b/daemon/info.go index 921a236acd..557f779935 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -62,7 +62,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { v := &types.Info{ ID: daemon.ID, Containers: len(daemon.List()), - Images: len(daemon.Graph().Map()), + Images: len(daemon.imageStore.Map()), Driver: daemon.GraphDriver().String(), DriverStatus: daemon.GraphDriver().Status(), Plugins: daemon.showPluginsInfo(), diff --git a/daemon/inspect.go b/daemon/inspect.go index d17f2dca6c..9adea0608c 100644 --- a/daemon/inspect.go +++ b/daemon/inspect.go @@ -8,6 +8,7 @@ import ( "github.com/docker/docker/api/types/versions/v1p20" "github.com/docker/docker/daemon/exec" "github.com/docker/docker/daemon/network" + "github.com/docker/docker/layer" ) // ContainerInspect returns low-level information about a @@ -124,7 +125,7 @@ func (daemon *Daemon) getInspectData(container *Container, size bool) (*types.Co Path: container.Path, Args: container.Args, State: containerState, - Image: container.ImageID, + Image: container.ImageID.String(), LogPath: container.LogPath, Name: container.Name, RestartCount: container.RestartCount, @@ -149,7 +150,18 @@ func (daemon *Daemon) getInspectData(container *Container, size bool) (*types.Co contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) contJSONBase.GraphDriver.Name = container.Driver - graphDriverData, err := daemon.driver.GetMetadata(container.ID) + + image, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return nil, err + } + l, err := daemon.layerStore.Get(image.RootFS.ChainID()) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + graphDriverData, err := l.Metadata() if err != nil { return nil, err } diff --git a/daemon/list.go b/daemon/list.go index 18152dc7aa..b64103a0b7 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -9,7 +9,6 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" derr "github.com/docker/docker/errors" - "github.com/docker/docker/graph" "github.com/docker/docker/image" "github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/nat" @@ -66,7 +65,7 @@ type listContext struct { // names is a list of container names to filter with names map[string][]string // images is a list of images to filter with - images map[string]bool + images map[image.ID]bool // filters is a collection of arguments to filter with, specified by the user filters filters.Args // exitAllowed is a list of exit codes allowed to filter with @@ -176,25 +175,24 @@ func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) } } - imagesFilter := map[string]bool{} + imagesFilter := map[image.ID]bool{} var ancestorFilter bool if ancestors, ok := psFilters["ancestor"]; ok { ancestorFilter = true - byParents := daemon.Graph().ByParent() // The idea is to walk the graph down the most "efficient" way. for _, ancestor := range ancestors { // First, get the imageId of the ancestor filter (yay) - image, err := daemon.repositories.LookupImage(ancestor) + id, err := daemon.GetImageID(ancestor) if err != nil { logrus.Warnf("Error while looking up for image %v", ancestor) continue } - if imagesFilter[ancestor] { + if imagesFilter[id] { // Already seen this ancestor, skip it continue } // Then walk down the graph and put the imageIds in imagesFilter - populateImageFilterByParents(imagesFilter, image.ID, byParents) + populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) } } @@ -310,41 +308,29 @@ func includeContainerInList(container *Container, ctx *listContext) iterationAct return includeContainer } -func getImage(s *graph.TagStore, img, imgID string) (string, error) { - // both Image and ImageID is actually ids, nothing to guess - if strings.HasPrefix(imgID, img) { - return img, nil - } - id, err := s.GetID(img) - if err != nil { - if err == graph.ErrNameIsNotExist { - return imgID, nil - } - return "", err - } - if id != imgID { - return imgID, nil - } - return img, nil -} - // transformContainer generates the container type expected by the docker ps command. func (daemon *Daemon) transformContainer(container *Container, ctx *listContext) (*types.Container, error) { newC := &types.Container{ ID: container.ID, Names: ctx.names[container.ID], - ImageID: container.ImageID, + ImageID: container.ImageID.String(), } if newC.Names == nil { // Dead containers will often have no name, so make sure the response isn't null newC.Names = []string{} } - showImg, err := getImage(daemon.repositories, container.Config.Image, container.ImageID) - if err != nil { - return nil, err + image := container.Config.Image // if possible keep the original ref + if image != container.ImageID.String() { + id, err := daemon.GetImageID(image) + if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { + return nil, err + } + if err != nil || id != container.ImageID { + image = container.ImageID.String() + } } - newC.Image = showImg + newC.Image = image if len(container.Args) > 0 { args := []string{} @@ -433,12 +419,10 @@ func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, error) { return volumesOut, nil } -func populateImageFilterByParents(ancestorMap map[string]bool, imageID string, byParents map[string][]*image.Image) { +func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { if !ancestorMap[imageID] { - if images, ok := byParents[imageID]; ok { - for _, image := range images { - populateImageFilterByParents(ancestorMap, image.ID, byParents) - } + for _, id := range getChildren(imageID) { + populateImageFilterByParents(ancestorMap, id, getChildren) } ancestorMap[imageID] = true } diff --git a/errors/daemon.go b/errors/daemon.go index 1a13e74918..72ddf278df 100644 --- a/errors/daemon.go +++ b/errors/daemon.go @@ -832,15 +832,6 @@ var ( HTTPStatusCode: http.StatusInternalServerError, }) - // ErrorCodeRmInit is generated when we try to delete a container - // but failed deleting its init filesystem. - ErrorCodeRmInit = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "RMINIT", - Message: "Driver %s failed to remove init filesystem %s: %s", - Description: "While trying to delete a container, the driver failed to remove the init filesystem", - HTTPStatusCode: http.StatusInternalServerError, - }) - // ErrorCodeRmFS is generated when we try to delete a container // but failed deleting its filesystem. ErrorCodeRmFS = errcode.Register(errGroup, errcode.ErrorDescriptor{ diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go index 216c20f200..d092beb0bc 100644 --- a/integration-cli/docker_api_containers_test.go +++ b/integration-cli/docker_api_containers_test.go @@ -553,7 +553,7 @@ func (s *DockerSuite) TestContainerApiCommit(c *check.C) { cName := "testapicommit" dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") - name := "TestContainerApiCommit" + name := "testcontainerapicommit" status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusCreated) @@ -586,7 +586,7 @@ func (s *DockerSuite) TestContainerApiCommitWithLabelInConfig(c *check.C) { "Labels": map[string]string{"key1": "value1", "key2": "value2"}, } - name := "TestContainerApiCommitWithConfig" + name := "testcontainerapicommitwithconfig" status, b, err := sockRequest("POST", "/commit?repo="+name+"&container="+cName, config) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusCreated) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 5b2ae74888..d4b31bd38f 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -4543,7 +4543,7 @@ func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { _, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true) // if the error doesnt check for illegal tag name, or the image is built // then this should fail - if !strings.Contains(out, "Illegal tag name") || strings.Contains(out, "Sending build context to Docker daemon") { + if !strings.Contains(out, "invalid reference format") || strings.Contains(out, "Sending build context to Docker daemon") { c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) } } @@ -6377,7 +6377,7 @@ func (s *DockerSuite) TestBuildTagEvent(c *check.C) { select { case ev := <-ch: c.Assert(ev.Status, check.Equals, "tag") - c.Assert(ev.ID, check.Equals, "test:") + c.Assert(ev.ID, check.Equals, "test:latest") case <-time.After(time.Second): c.Fatal("The 'tag' event not heard from the server") } diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go index 2d00605627..75c8b4fd6f 100644 --- a/integration-cli/docker_cli_by_digest_test.go +++ b/integration-cli/docker_cli_by_digest_test.go @@ -3,6 +3,8 @@ package main import ( "encoding/json" "fmt" + "os" + "path/filepath" "regexp" "strings" @@ -11,7 +13,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/utils" "github.com/go-check/check" ) @@ -32,7 +33,7 @@ func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { dockerCmd(c, "run", "-d", "-e", "digest=1", "--name", containerName, "busybox") // tag the image to upload it to the private registry - repoAndTag := utils.ImageReference(repoName, tag) + repoAndTag := repoName + ":" + tag out, _, err := dockerCmdWithError("commit", containerName, repoAndTag) c.Assert(err, checker.IsNil, check.Commentf("image tagging failed: %s", out)) @@ -438,6 +439,11 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { // Now try pulling that image by digest. We should get an error about // digest verification for the target layer digest. + // Remove distribution cache to force a re-pull of the blobs + if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { + c.Fatalf("error clearing distribution cache: %v", err) + } + // Pull from the registry using the @ reference. imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) out, exitStatus, _ := dockerCmdWithError("pull", imageReference) diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go index feb3fc055d..cac5b92d5c 100644 --- a/integration-cli/docker_cli_create_test.go +++ b/integration-cli/docker_cli_create_test.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/stringid" "github.com/go-check/check" ) @@ -243,6 +244,41 @@ func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) { dockerCmd(c, "create", fmt.Sprintf("--ipc=container:%s", id), "busybox") } +func (s *DockerSuite) TestCreateByImageID(c *check.C) { + imageName := "testcreatebyimageid" + imageID, err := buildImage(imageName, + `FROM busybox + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + truncatedImageID := stringid.TruncateID(imageID) + + dockerCmd(c, "create", imageID) + dockerCmd(c, "create", truncatedImageID) + dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID)) + + // Ensure this fails + out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "invalid reference format"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } + + out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "Unable to find image"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } +} + func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-create") diff --git a/integration-cli/docker_cli_external_graphdriver_unix_test.go b/integration-cli/docker_cli_external_graphdriver_unix_test.go index 4f87f60f08..3dcbe620ab 100644 --- a/integration-cli/docker_cli_external_graphdriver_unix_test.go +++ b/integration-cli/docker_cli_external_graphdriver_unix_test.go @@ -325,6 +325,8 @@ func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriver(c *check.C) { err = s.d.Stop() c.Assert(err, check.IsNil) + // Don't check s.ec.exists, because the daemon no longer calls the + // Exists function. c.Assert(s.ec.activations, check.Equals, 2) c.Assert(s.ec.init, check.Equals, 2) c.Assert(s.ec.creations >= 1, check.Equals, true) @@ -333,7 +335,6 @@ func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriver(c *check.C) { c.Assert(s.ec.puts >= 1, check.Equals, true) c.Assert(s.ec.stats, check.Equals, 3) c.Assert(s.ec.cleanups, check.Equals, 2) - c.Assert(s.ec.exists >= 1, check.Equals, true) c.Assert(s.ec.applydiff >= 1, check.Equals, true) c.Assert(s.ec.changes, check.Equals, 1) c.Assert(s.ec.diffsize, check.Equals, 0) diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go index 115b08a586..42e3e46283 100644 --- a/integration-cli/docker_cli_images_test.go +++ b/integration-cli/docker_cli_images_test.go @@ -98,9 +98,9 @@ func (s *DockerSuite) TestImagesFilterLabel(c *check.C) { out, _ := dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match") out = strings.TrimSpace(out) - c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w]*%s[\\s\\w]*", image1ID)) - c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w]*%s[\\s\\w]*", image2ID)) - c.Assert(out, check.Not(check.Matches), fmt.Sprintf("[\\s\\w]*%s[\\s\\w]*", image3ID)) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image1ID)) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image2ID)) + c.Assert(out, check.Not(check.Matches), fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image3ID)) out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match=me too") out = strings.TrimSpace(out) @@ -204,7 +204,7 @@ func (s *DockerSuite) TestImagesEnsureOnlyHeadsImagesShown(c *check.C) { // images shouldn't show non-heads images c.Assert(out, checker.Not(checker.Contains), intermediate) // images should contain final built images - c.Assert(out, checker.Contains, head[:12]) + c.Assert(out, checker.Contains, stringid.TruncateID(head)) } func (s *DockerSuite) TestImagesEnsureImagesFromScratchShown(c *check.C) { @@ -219,5 +219,5 @@ func (s *DockerSuite) TestImagesEnsureImagesFromScratchShown(c *check.C) { out, _ := dockerCmd(c, "images") // images should contain images built from scratch - c.Assert(out, checker.Contains, id[:12]) + c.Assert(out, checker.Contains, stringid.TruncateID(id)) } diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go index a035f1a15b..2e2c7b94ac 100644 --- a/integration-cli/docker_cli_inspect_test.go +++ b/integration-cli/docker_cli_inspect_test.go @@ -23,7 +23,12 @@ func checkValidGraphDriver(c *check.C, name string) { func (s *DockerSuite) TestInspectImage(c *check.C) { testRequires(c, DaemonIsLinux) imageTest := "emptyfs" - imageTestID := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + // It is important that this ID remain stable. If a code change causes + // it to be different, this is equivalent to a cache bust when pulling + // a legacy-format manifest. If the check at the end of this function + // fails, fix the difference in the image serialization instead of + // updating this hash. + imageTestID := "sha256:11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d" id, err := inspectField(imageTest, "Id") c.Assert(err, checker.IsNil) diff --git a/integration-cli/docker_cli_pull_local_test.go b/integration-cli/docker_cli_pull_local_test.go index 2d5374bef9..5771513eb9 100644 --- a/integration-cli/docker_cli_pull_local_test.go +++ b/integration-cli/docker_cli_pull_local_test.go @@ -159,3 +159,71 @@ func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { c.Assert(strings.TrimSpace(out), check.Equals, "/bin/sh -c echo "+repo, check.Commentf("CMD did not contain /bin/sh -c echo %s; %s", repo, out)) } } + +// TestPullIDStability verifies that pushing an image and pulling it back +// preserves the image ID. +func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) { + derivedImage := privateRegistryURL + "/dockercli/id-stability" + baseImage := "busybox" + + _, err := buildImage(derivedImage, fmt.Sprintf(` + FROM %s + ENV derived true + ENV asdf true + RUN dd if=/dev/zero of=/file bs=1024 count=1024 + CMD echo %s + `, baseImage, derivedImage), true) + if err != nil { + c.Fatal(err) + } + + originalID, err := getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + dockerCmd(c, "push", derivedImage) + + // Pull + out, _ := dockerCmd(c, "pull", derivedImage) + if strings.Contains(out, "Pull complete") { + c.Fatalf("repull redownloaded a layer: %s", out) + } + + derivedIDAfterPull, err := getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + + // Make sure the image runs correctly + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } + + // Confirm that repushing and repulling does not change the computed ID + dockerCmd(c, "push", derivedImage) + dockerCmd(c, "rmi", derivedImage) + dockerCmd(c, "pull", derivedImage) + + derivedIDAfterPull, err = getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + // Make sure the image still runs + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } +} diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go index ce74c7d7f2..4f5f7281fb 100644 --- a/integration-cli/docker_cli_pull_test.go +++ b/integration-cli/docker_cli_pull_test.go @@ -1,11 +1,7 @@ package main import ( - "encoding/json" "fmt" - "io/ioutil" - "os" - "path/filepath" "regexp" "strings" "time" @@ -46,19 +42,19 @@ func (s *DockerHubPullSuite) TestPullFromCentralRegistry(c *check.C) { func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { testRequires(c, DaemonIsLinux) for _, e := range []struct { - Image string + Repo string Alias string }{ - {"library/asdfasdf:foobar", "asdfasdf:foobar"}, - {"library/asdfasdf:foobar", "library/asdfasdf:foobar"}, - {"library/asdfasdf:latest", "asdfasdf"}, - {"library/asdfasdf:latest", "asdfasdf:latest"}, - {"library/asdfasdf:latest", "library/asdfasdf"}, - {"library/asdfasdf:latest", "library/asdfasdf:latest"}, + {"library/asdfasdf", "asdfasdf:foobar"}, + {"library/asdfasdf", "library/asdfasdf:foobar"}, + {"library/asdfasdf", "asdfasdf"}, + {"library/asdfasdf", "asdfasdf:latest"}, + {"library/asdfasdf", "library/asdfasdf"}, + {"library/asdfasdf", "library/asdfasdf:latest"}, } { out, err := s.CmdWithError("pull", e.Alias) c.Assert(err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", out)) - c.Assert(out, checker.Contains, fmt.Sprintf("Error: image %s not found", e.Image), check.Commentf("expected image not found error messages")) + c.Assert(out, checker.Contains, fmt.Sprintf("Error: image %s not found", e.Repo), check.Commentf("expected image not found error messages")) } } @@ -163,254 +159,3 @@ func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) { time.Sleep(500 * time.Millisecond) } } - -type idAndParent struct { - ID string - Parent string -} - -func inspectImage(c *check.C, imageRef string) idAndParent { - out, _ := dockerCmd(c, "inspect", imageRef) - var inspectOutput []idAndParent - err := json.Unmarshal([]byte(out), &inspectOutput) - if err != nil { - c.Fatal(err) - } - - return inspectOutput[0] -} - -func imageID(c *check.C, imageRef string) string { - return inspectImage(c, imageRef).ID -} - -func imageParent(c *check.C, imageRef string) string { - return inspectImage(c, imageRef).Parent -} - -// TestPullMigration verifies that pulling an image based on layers -// that already exists locally will reuse those existing layers. -func (s *DockerRegistrySuite) TestPullMigration(c *check.C) { - repoName := privateRegistryURL + "/dockercli/migration" - - baseImage := repoName + ":base" - _, err := buildImage(baseImage, fmt.Sprintf(` - FROM scratch - ENV IMAGE base - CMD echo %s - `, baseImage), true) - if err != nil { - c.Fatal(err) - } - - baseIDBeforePush := imageID(c, baseImage) - baseParentBeforePush := imageParent(c, baseImage) - - derivedImage := repoName + ":derived" - _, err = buildImage(derivedImage, fmt.Sprintf(` - FROM %s - CMD echo %s - `, baseImage, derivedImage), true) - if err != nil { - c.Fatal(err) - } - - derivedIDBeforePush := imageID(c, derivedImage) - - dockerCmd(c, "push", derivedImage) - - // Remove derived image from the local store - dockerCmd(c, "rmi", derivedImage) - - // Repull - dockerCmd(c, "pull", derivedImage) - - // Check that the parent of this pulled image is the original base - // image - derivedIDAfterPull1 := imageID(c, derivedImage) - derivedParentAfterPull1 := imageParent(c, derivedImage) - - if derivedIDAfterPull1 == derivedIDBeforePush { - c.Fatal("image's ID should have changed on after deleting and pulling") - } - - if derivedParentAfterPull1 != baseIDBeforePush { - c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull1, baseIDBeforePush) - } - - // Confirm that repushing and repulling does not change the computed ID - dockerCmd(c, "push", derivedImage) - dockerCmd(c, "rmi", derivedImage) - dockerCmd(c, "pull", derivedImage) - - derivedIDAfterPull2 := imageID(c, derivedImage) - derivedParentAfterPull2 := imageParent(c, derivedImage) - - if derivedIDAfterPull2 != derivedIDAfterPull1 { - c.Fatal("image's ID unexpectedly changed after a repush/repull") - } - - if derivedParentAfterPull2 != baseIDBeforePush { - c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull2, baseIDBeforePush) - } - - // Remove everything, repull, and make sure everything uses computed IDs - dockerCmd(c, "rmi", baseImage, derivedImage) - dockerCmd(c, "pull", derivedImage) - - derivedIDAfterPull3 := imageID(c, derivedImage) - derivedParentAfterPull3 := imageParent(c, derivedImage) - derivedGrandparentAfterPull3 := imageParent(c, derivedParentAfterPull3) - - if derivedIDAfterPull3 != derivedIDAfterPull1 { - c.Fatal("image's ID unexpectedly changed after a second repull") - } - - if derivedParentAfterPull3 == baseIDBeforePush { - c.Fatalf("pulled image's parent ID (%s) should not match base image's original ID (%s)", derivedParentAfterPull3, derivedIDBeforePush) - } - - if derivedGrandparentAfterPull3 == baseParentBeforePush { - c.Fatal("base image's parent ID should have been rewritten on pull") - } -} - -// TestPullMigrationRun verifies that pulling an image based on layers -// that already exists locally will result in an image that runs properly. -func (s *DockerRegistrySuite) TestPullMigrationRun(c *check.C) { - type idAndParent struct { - ID string - Parent string - } - - derivedImage := privateRegistryURL + "/dockercli/migration-run" - baseImage := "busybox" - - _, err := buildImage(derivedImage, fmt.Sprintf(` - FROM %s - RUN dd if=/dev/zero of=/file bs=1024 count=1024 - CMD echo %s - `, baseImage, derivedImage), true) - if err != nil { - c.Fatal(err) - } - - baseIDBeforePush := imageID(c, baseImage) - derivedIDBeforePush := imageID(c, derivedImage) - - dockerCmd(c, "push", derivedImage) - - // Remove derived image from the local store - dockerCmd(c, "rmi", derivedImage) - - // Repull - dockerCmd(c, "pull", derivedImage) - - // Check that this pulled image is based on the original base image - derivedIDAfterPull1 := imageID(c, derivedImage) - derivedParentAfterPull1 := imageParent(c, imageParent(c, derivedImage)) - - if derivedIDAfterPull1 == derivedIDBeforePush { - c.Fatal("image's ID should have changed on after deleting and pulling") - } - - if derivedParentAfterPull1 != baseIDBeforePush { - c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull1, baseIDBeforePush) - } - - // Make sure the image runs correctly - out, _ := dockerCmd(c, "run", "--rm", derivedImage) - if strings.TrimSpace(out) != derivedImage { - c.Fatalf("expected %s; got %s", derivedImage, out) - } - - // Confirm that repushing and repulling does not change the computed ID - dockerCmd(c, "push", derivedImage) - dockerCmd(c, "rmi", derivedImage) - dockerCmd(c, "pull", derivedImage) - - derivedIDAfterPull2 := imageID(c, derivedImage) - derivedParentAfterPull2 := imageParent(c, imageParent(c, derivedImage)) - - if derivedIDAfterPull2 != derivedIDAfterPull1 { - c.Fatal("image's ID unexpectedly changed after a repush/repull") - } - - if derivedParentAfterPull2 != baseIDBeforePush { - c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull2, baseIDBeforePush) - } - - // Make sure the image still runs - out, _ = dockerCmd(c, "run", "--rm", derivedImage) - if strings.TrimSpace(out) != derivedImage { - c.Fatalf("expected %s; got %s", derivedImage, out) - } -} - -// TestPullConflict provides coverage of the situation where a computed -// strongID conflicts with some unverifiable data in the graph. -func (s *DockerRegistrySuite) TestPullConflict(c *check.C) { - repoName := privateRegistryURL + "/dockercli/conflict" - - _, err := buildImage(repoName, ` - FROM scratch - ENV IMAGE conflict - CMD echo conflict - `, true) - if err != nil { - c.Fatal(err) - } - - dockerCmd(c, "push", repoName) - - // Pull to make it content-addressable - dockerCmd(c, "rmi", repoName) - dockerCmd(c, "pull", repoName) - - IDBeforeLoad := imageID(c, repoName) - - // Load/save to turn this into an unverified image with the same ID - tmpDir, err := ioutil.TempDir("", "conflict-save-output") - if err != nil { - c.Errorf("failed to create temporary directory: %s", err) - } - defer os.RemoveAll(tmpDir) - - tarFile := filepath.Join(tmpDir, "repo.tar") - - dockerCmd(c, "save", "-o", tarFile, repoName) - dockerCmd(c, "rmi", repoName) - dockerCmd(c, "load", "-i", tarFile) - - // Check that the the ID is the same after save/load. - IDAfterLoad := imageID(c, repoName) - - if IDAfterLoad != IDBeforeLoad { - c.Fatal("image's ID should be the same after save/load") - } - - // Repull - dockerCmd(c, "pull", repoName) - - // Check that the ID is now different because of the conflict. - IDAfterPull1 := imageID(c, repoName) - - // Expect the new ID to be SHA256(oldID) - expectedIDDigest, err := digest.FromBytes([]byte(IDBeforeLoad)) - if err != nil { - c.Fatalf("digest error: %v", err) - } - expectedID := expectedIDDigest.Hex() - if IDAfterPull1 != expectedID { - c.Fatalf("image's ID should have changed on pull to %s (got %s)", expectedID, IDAfterPull1) - } - - // A second pull should use the new ID again. - dockerCmd(c, "pull", repoName) - - IDAfterPull2 := imageID(c, repoName) - - if IDAfterPull2 != IDAfterPull1 { - c.Fatal("image's ID unexpectedly changed after a repull") - } -} diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go index 7c4e5c9428..71ebed7d84 100644 --- a/integration-cli/docker_cli_push_test.go +++ b/integration-cli/docker_cli_push_test.go @@ -2,16 +2,13 @@ package main import ( "archive/tar" - "encoding/json" "fmt" "io/ioutil" "os" "os/exec" - "path/filepath" "strings" "time" - "github.com/docker/docker/image" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) @@ -86,46 +83,6 @@ func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) { } } -// TestPushBadParentChain tries to push an image with a corrupted parent chain -// in the v1compatibility files, and makes sure the push process fixes it. -func (s *DockerRegistrySuite) TestPushBadParentChain(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/badparent", privateRegistryURL) - - id, err := buildImage(repoName, ` - FROM busybox - CMD echo "adding another layer" - `, true) - if err != nil { - c.Fatal(err) - } - - // Push to create v1compatibility file - dockerCmd(c, "push", repoName) - - // Corrupt the parent in the v1compatibility file from the top layer - filename := filepath.Join(dockerBasePath, "graph", id, "v1Compatibility") - - jsonBytes, err := ioutil.ReadFile(filename) - c.Assert(err, check.IsNil, check.Commentf("Could not read v1Compatibility file: %s", err)) - - var img image.Image - err = json.Unmarshal(jsonBytes, &img) - c.Assert(err, check.IsNil, check.Commentf("Could not unmarshal json: %s", err)) - - img.Parent = "1234123412341234123412341234123412341234123412341234123412341234" - - jsonBytes, err = json.Marshal(&img) - c.Assert(err, check.IsNil, check.Commentf("Could not marshal json: %s", err)) - - err = ioutil.WriteFile(filename, jsonBytes, 0600) - c.Assert(err, check.IsNil, check.Commentf("Could not write v1Compatibility file: %s", err)) - - dockerCmd(c, "push", repoName) - - // pull should succeed - dockerCmd(c, "pull", repoName) -} - func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) emptyTarball, err := ioutil.TempFile("", "empty_tarball") diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go index 89bb9db10a..13fa08ef64 100644 --- a/integration-cli/docker_cli_rmi_test.go +++ b/integration-cli/docker_cli_rmi_test.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" "github.com/go-check/check" ) @@ -85,7 +86,7 @@ func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) { // first checkout without force it fails out, _, err = dockerCmdWithError("rmi", imgID) - expected := fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", imgID[:12], containerID[:12]) + expected := fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)) // rmi tagged in multiple repos should have failed without force c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, expected) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 8ec54e5a2b..546ad4d516 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -3749,3 +3749,15 @@ func (s *DockerSuite) TestDockerFails(c *check.C) { c.Fatalf("Docker run with flag not defined should exit with 125, but we got out: %s, exit: %d, err: %s", out, exit, err) } } + +// TestRunInvalidReference invokes docker run with a bad reference. +func (s *DockerSuite) TestRunInvalidReference(c *check.C) { + out, exit, _ := dockerCmdWithError("run", "busybox@foo") + if exit == 0 { + c.Fatalf("expected non-zero exist code; received %d", exit) + } + + if !strings.Contains(out, "invalid reference format") { + c.Fatalf(`Expected "invalid reference format" in output; got: %s`, out) + } +} diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go index 673d3a94a8..b81a03319e 100644 --- a/integration-cli/docker_cli_save_load_test.go +++ b/integration-cli/docker_cli_save_load_test.go @@ -8,10 +8,12 @@ import ( "os/exec" "path/filepath" "reflect" + "regexp" "sort" "strings" "time" + "github.com/docker/distribution/digest" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) @@ -100,7 +102,7 @@ func (s *DockerSuite) TestSaveCheckTimes(c *check.C) { out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command("tar", "tv"), - exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), data[0].ID))) + exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex()))) c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) } @@ -110,7 +112,7 @@ func (s *DockerSuite) TestSaveImageId(c *check.C) { dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName)) out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) - cleanedLongImageID := strings.TrimSpace(out) + cleanedLongImageID := strings.TrimPrefix(strings.TrimSpace(out), "sha256:") out, _ = dockerCmd(c, "images", "-q", repoName) cleanedShortImageID := strings.TrimSpace(out) @@ -207,20 +209,30 @@ func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { // create the archive out, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "save", repoName), - exec.Command("tar", "t"), - exec.Command("grep", "VERSION"), - exec.Command("cut", "-d", "/", "-f1")) + exec.Command(dockerBinary, "save", repoName, "busybox:latest"), + exec.Command("tar", "t")) c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple images: %s, %v", out, err)) - actual := strings.Split(strings.TrimSpace(out), "\n") + + lines := strings.Split(strings.TrimSpace(out), "\n") + var actual []string + for _, l := range lines { + if regexp.MustCompile("^[a-f0-9]{64}\\.json$").Match([]byte(l)) { + actual = append(actual, strings.TrimSuffix(l, ".json")) + } + } // make the list of expected layers - out, _ = dockerCmd(c, "history", "-q", "--no-trunc", "busybox:latest") - expected := append(strings.Split(strings.TrimSpace(out), "\n"), idFoo, idBar) + out, _ = dockerCmd(c, "inspect", "-f", "{{.Id}}", "busybox:latest") + expected := []string{strings.TrimSpace(out), idFoo, idBar} + + // prefixes are not in tar + for i := range expected { + expected[i] = digest.Digest(expected[i]).Hex() + } sort.Strings(actual) sort.Strings(expected) - c.Assert(actual, checker.DeepEquals, expected, check.Commentf("archive does not contains the right layers: got %v, expected %v", actual, expected)) + c.Assert(actual, checker.DeepEquals, expected, check.Commentf("archive does not contains the right layers: got %v, expected %v, output: %q", actual, expected, out)) } // Issue #6722 #5892 ensure directories are included in changes diff --git a/integration-cli/docker_cli_save_load_unix_test.go b/integration-cli/docker_cli_save_load_unix_test.go index 185c1a7162..cef7d43079 100644 --- a/integration-cli/docker_cli_save_load_unix_test.go +++ b/integration-cli/docker_cli_save_load_unix_test.go @@ -18,9 +18,7 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { dockerCmd(c, "run", "--name", name, "busybox", "true") repoName := "foobar-save-load-test" - out, _ := dockerCmd(c, "commit", name, repoName) - - before, _ := dockerCmd(c, "inspect", repoName) + before, _ := dockerCmd(c, "commit", name, repoName) tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar") c.Assert(err, check.IsNil) @@ -40,10 +38,10 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { loadCmd := exec.Command(dockerBinary, "load") loadCmd.Stdin = tmpFile - out, _, err = runCommandWithOutput(loadCmd) + out, _, err := runCommandWithOutput(loadCmd) c.Assert(err, check.IsNil, check.Commentf(out)) - after, _ := dockerCmd(c, "inspect", repoName) + after, _ := dockerCmd(c, "inspect", "-f", "{{.Id}}", repoName) c.Assert(before, check.Equals, after) //inspect is not the same after a save / load diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go index cd0ca4126d..aa81217492 100644 --- a/integration-cli/docker_cli_tag_test.go +++ b/integration-cli/docker_cli_tag_test.go @@ -1,9 +1,11 @@ package main import ( + "fmt" "strings" "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringutils" "github.com/go-check/check" ) @@ -111,7 +113,7 @@ func (s *DockerSuite) TestTagWithPrefixHyphen(c *check.C) { // test index name begin with '-' out, _, err = dockerCmdWithError("tag", "busybox:latest", "-index:5000/busybox:test") c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Invalid index name (-index:5000). Cannot begin or end with a hyphen", check.Commentf("tag a name begin with '-' should failed")) + c.Assert(out, checker.Contains, "invalid reference format", check.Commentf("tag a name begin with '-' should failed")) } // ensure tagging using official names works @@ -171,3 +173,57 @@ func (s *DockerSuite) TestTagMatchesDigest(c *check.C) { c.Fatal("inspecting by digest should have failed") } } + +func (s *DockerSuite) TestTagInvalidRepoName(c *check.C) { + testRequires(c, DaemonIsLinux) + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "-f", "busybox:latest", "sha256:sometag") + if err == nil { + c.Fatal("tagging with image named \"sha256\" should have failed") + } +} + +// ensure tags cannot create ambiguity with image ids +func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) { + testRequires(c, DaemonIsLinux) + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + + imageID, err := buildImage("notbusybox:latest", + `FROM busybox + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + truncatedImageID := stringid.TruncateID(imageID) + truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID) + + id, err := inspectField(truncatedTag, "Id") + if err != nil { + c.Fatalf("Error inspecting by image id: %s", err) + } + + // Ensure inspect by image id returns image for image id + c.Assert(id, checker.Equals, imageID) + c.Logf("Built image: %s", imageID) + + // test setting tag fails + _, _, err = dockerCmdWithError("tag", "-f", "busybox:latest", truncatedTag) + if err != nil { + c.Fatalf("Error tagging with an image id: %s", err) + } + + id, err = inspectField(truncatedTag, "Id") + if err != nil { + c.Fatalf("Error inspecting by image id: %s", err) + } + + // Ensure id is imageID and not busybox:latest + c.Assert(id, checker.Not(checker.Equals), imageID) +} diff --git a/pkg/parsers/parsers.go b/pkg/parsers/parsers.go index 59fea5cd9e..51de847b16 100644 --- a/pkg/parsers/parsers.go +++ b/pkg/parsers/parsers.go @@ -110,26 +110,6 @@ func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil } -// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest -// The tag can be confusing because of a port in a repository name. -// Ex: localhost.localdomain:5000/samalba/hipache:latest -// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb -func ParseRepositoryTag(repos string) (string, string) { - n := strings.Index(repos, "@") - if n >= 0 { - parts := strings.Split(repos, "@") - return parts[0], parts[1] - } - n = strings.LastIndex(repos, ":") - if n < 0 { - return repos, "" - } - if tag := repos[n+1:]; !strings.Contains(tag, "/") { - return repos[:n], tag - } - return repos, "" -} - // PartParser parses and validates the specified string (data) using the specified template // e.g. ip:public:private -> 192.168.0.1:80:8000 func PartParser(template, data string) (map[string]string, error) { diff --git a/pkg/parsers/parsers_test.go b/pkg/parsers/parsers_test.go index 47b45281c7..db538d583e 100644 --- a/pkg/parsers/parsers_test.go +++ b/pkg/parsers/parsers_test.go @@ -120,36 +120,6 @@ func TestParseInvalidUnixAddrInvalid(t *testing.T) { } } -func TestParseRepositoryTag(t *testing.T) { - if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "root" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "root", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } - if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "user/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "user/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } - if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "url:5000/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "url:5000/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } -} - func TestParseKeyValueOpt(t *testing.T) { invalids := map[string]string{ "": "Unable to parse key/value option: ", diff --git a/pkg/stringid/stringid.go b/pkg/stringid/stringid.go index 266a74ba6d..0332da6676 100644 --- a/pkg/stringid/stringid.go +++ b/pkg/stringid/stringid.go @@ -7,6 +7,7 @@ import ( "io" "regexp" "strconv" + "strings" "github.com/docker/docker/pkg/random" ) @@ -25,6 +26,9 @@ func IsShortID(id string) bool { // In case of a collision a lookup with TruncIndex.Get() will fail, and the caller // will need to use a langer prefix, or the full-length Id. func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } trimTo := shortLen if len(id) < shortLen { trimTo = len(id) diff --git a/registry/config.go b/registry/config.go index 7cac715840..8d7962f8d4 100644 --- a/registry/config.go +++ b/registry/config.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/docker/distribution/reference" - "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" ) @@ -216,18 +216,15 @@ func ValidateIndexName(val string) (string, error) { return val, nil } -func validateRemoteName(remoteName string) error { - - if !strings.Contains(remoteName, "/") { - +func validateRemoteName(remoteName reference.Named) error { + remoteNameStr := remoteName.Name() + if !strings.Contains(remoteNameStr, "/") { // the repository name must not be a valid image ID - if err := image.ValidateID(remoteName); err == nil { + if err := v1.ValidateID(remoteNameStr); err == nil { return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", remoteName) } } - - _, err := reference.WithName(remoteName) - return err + return nil } func validateNoSchema(reposName string) error { @@ -239,27 +236,24 @@ func validateNoSchema(reposName string) error { } // ValidateRepositoryName validates a repository name -func ValidateRepositoryName(reposName string) error { - _, _, err := loadRepositoryName(reposName, true) +func ValidateRepositoryName(reposName reference.Named) error { + _, _, err := loadRepositoryName(reposName) return err } // loadRepositoryName returns the repo name splitted into index name // and remote repo name. It returns an error if the name is not valid. -func loadRepositoryName(reposName string, checkRemoteName bool) (string, string, error) { - if err := validateNoSchema(reposName); err != nil { - return "", "", err +func loadRepositoryName(reposName reference.Named) (string, reference.Named, error) { + if err := validateNoSchema(reposName.Name()); err != nil { + return "", nil, err } - indexName, remoteName := splitReposName(reposName) + indexName, remoteName, err := splitReposName(reposName) - var err error if indexName, err = ValidateIndexName(indexName); err != nil { - return "", "", err + return "", nil, err } - if checkRemoteName { - if err = validateRemoteName(remoteName); err != nil { - return "", "", err - } + if err = validateRemoteName(remoteName); err != nil { + return "", nil, err } return indexName, remoteName, nil } @@ -297,31 +291,36 @@ func (index *IndexInfo) GetAuthConfigKey() string { } // splitReposName breaks a reposName into an index name and remote name -func splitReposName(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - var indexName, remoteName string - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { +func splitReposName(reposName reference.Named) (indexName string, remoteName reference.Named, err error) { + var remoteNameStr string + indexName, remoteNameStr = reference.SplitHostname(reposName) + if indexName == "" || (!strings.Contains(indexName, ".") && + !strings.Contains(indexName, ":") && indexName != "localhost") { // This is a Docker Index repos (ex: samalba/hipache or ubuntu) // 'docker.io' indexName = IndexName remoteName = reposName } else { - indexName = nameParts[0] - remoteName = nameParts[1] + remoteName, err = reference.WithName(remoteNameStr) } - return indexName, remoteName + return } // NewRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func (config *ServiceConfig) NewRepositoryInfo(reposName string, bySearch bool) (*RepositoryInfo, error) { - indexName, remoteName, err := loadRepositoryName(reposName, !bySearch) - if err != nil { +func (config *ServiceConfig) NewRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { + if err := validateNoSchema(reposName.Name()); err != nil { return nil, err } - repoInfo := &RepositoryInfo{ - RemoteName: remoteName, + repoInfo := &RepositoryInfo{} + var ( + indexName string + err error + ) + + indexName, repoInfo.RemoteName, err = loadRepositoryName(reposName) + if err != nil { + return nil, err } repoInfo.Index, err = config.NewIndexInfo(indexName) @@ -330,46 +329,47 @@ func (config *ServiceConfig) NewRepositoryInfo(reposName string, bySearch bool) } if repoInfo.Index.Official { - normalizedName := normalizeLibraryRepoName(repoInfo.RemoteName) + repoInfo.LocalName, err = normalizeLibraryRepoName(repoInfo.RemoteName) + if err != nil { + return nil, err + } + repoInfo.RemoteName = repoInfo.LocalName - repoInfo.LocalName = normalizedName - repoInfo.RemoteName = normalizedName // If the normalized name does not contain a '/' (e.g. "foo") // then it is an official repo. - if strings.IndexRune(normalizedName, '/') == -1 { + if strings.IndexRune(repoInfo.RemoteName.Name(), '/') == -1 { repoInfo.Official = true // Fix up remote name for official repos. - repoInfo.RemoteName = "library/" + normalizedName + repoInfo.RemoteName, err = reference.WithName("library/" + repoInfo.RemoteName.Name()) + if err != nil { + return nil, err + } } - repoInfo.CanonicalName = "docker.io/" + repoInfo.RemoteName + repoInfo.CanonicalName, err = reference.WithName("docker.io/" + repoInfo.RemoteName.Name()) + if err != nil { + return nil, err + } } else { - repoInfo.LocalName = localNameFromRemote(repoInfo.Index.Name, repoInfo.RemoteName) + repoInfo.LocalName, err = localNameFromRemote(repoInfo.Index.Name, repoInfo.RemoteName) + if err != nil { + return nil, err + } repoInfo.CanonicalName = repoInfo.LocalName - } return repoInfo, nil } -// GetSearchTerm special-cases using local name for official index, and -// remote name for private indexes. -func (repoInfo *RepositoryInfo) GetSearchTerm() string { - if repoInfo.Index.Official { - return repoInfo.LocalName - } - return repoInfo.RemoteName -} - // ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but // lacks registry configuration. -func ParseRepositoryInfo(reposName string) (*RepositoryInfo, error) { - return emptyServiceConfig.NewRepositoryInfo(reposName, false) +func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { + return emptyServiceConfig.NewRepositoryInfo(reposName) } -// ParseIndexInfo will use repository name to get back an indexInfo. -func ParseIndexInfo(reposName string) (*IndexInfo, error) { - indexName, _ := splitReposName(reposName) +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +func ParseSearchIndexInfo(reposName string) (*IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) indexInfo, err := emptyServiceConfig.NewIndexInfo(indexName) if err != nil { @@ -378,12 +378,12 @@ func ParseIndexInfo(reposName string) (*IndexInfo, error) { return indexInfo, nil } -// NormalizeLocalName transforms a repository name into a normalize LocalName +// NormalizeLocalName transforms a repository name into a normalized LocalName // Passes through the name without transformation on error (image id, etc) // It does not use the repository info because we don't want to load // the repository index and do request over the network. -func NormalizeLocalName(name string) string { - indexName, remoteName, err := loadRepositoryName(name, true) +func NormalizeLocalName(name reference.Named) reference.Named { + indexName, remoteName, err := loadRepositoryName(name) if err != nil { return name } @@ -395,23 +395,52 @@ func NormalizeLocalName(name string) string { } if officialIndex { - return normalizeLibraryRepoName(remoteName) + localName, err := normalizeLibraryRepoName(remoteName) + if err != nil { + return name + } + return localName } - return localNameFromRemote(indexName, remoteName) + localName, err := localNameFromRemote(indexName, remoteName) + if err != nil { + return name + } + return localName } // normalizeLibraryRepoName removes the library prefix from // the repository name for official repos. -func normalizeLibraryRepoName(name string) string { - if strings.HasPrefix(name, "library/") { +func normalizeLibraryRepoName(name reference.Named) (reference.Named, error) { + if strings.HasPrefix(name.Name(), "library/") { // If pull "library/foo", it's stored locally under "foo" - name = strings.SplitN(name, "/", 2)[1] + return reference.WithName(strings.SplitN(name.Name(), "/", 2)[1]) } - return name + return name, nil } // localNameFromRemote combines the index name and the repo remote name // to generate a repo local name. -func localNameFromRemote(indexName, remoteName string) string { - return indexName + "/" + remoteName +func localNameFromRemote(indexName string, remoteName reference.Named) (reference.Named, error) { + return reference.WithName(indexName + "/" + remoteName.Name()) +} + +// NormalizeLocalReference transforms a reference to use a normalized LocalName +// for the name poriton. Passes through the reference without transformation on +// error. +func NormalizeLocalReference(ref reference.Named) reference.Named { + localName := NormalizeLocalName(ref) + if tagged, isTagged := ref.(reference.Tagged); isTagged { + newRef, err := reference.WithTag(localName, tagged.Tag()) + if err != nil { + return ref + } + return newRef + } else if digested, isDigested := ref.(reference.Digested); isDigested { + newRef, err := reference.WithDigest(localName, digested.Digest()) + if err != nil { + return ref + } + return newRef + } + return localName } diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go index fb19e577dd..3c75dea6d8 100644 --- a/registry/registry_mock_test.go +++ b/registry/registry_mock_test.go @@ -15,6 +15,7 @@ import ( "testing" "time" + "github.com/docker/distribution/reference" "github.com/docker/docker/opts" "github.com/gorilla/mux" @@ -349,15 +350,19 @@ func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } - repositoryName := mux.Vars(r)["repository"] + repositoryName, err := reference.WithName(mux.Vars(r)["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } repositoryName = NormalizeLocalName(repositoryName) - tags, exists := testRepositories[repositoryName] + tags, exists := testRepositories[repositoryName.String()] if !exists { apiError(w, "Repository not found", 404) return } if r.Method == "DELETE" { - delete(testRepositories, repositoryName) + delete(testRepositories, repositoryName.String()) writeResponse(w, true, 200) return } @@ -369,10 +374,14 @@ func handlerGetTag(w http.ResponseWriter, r *http.Request) { return } vars := mux.Vars(r) - repositoryName := vars["repository"] + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } repositoryName = NormalizeLocalName(repositoryName) tagName := vars["tag"] - tags, exists := testRepositories[repositoryName] + tags, exists := testRepositories[repositoryName.String()] if !exists { apiError(w, "Repository not found", 404) return @@ -390,13 +399,17 @@ func handlerPutTag(w http.ResponseWriter, r *http.Request) { return } vars := mux.Vars(r) - repositoryName := vars["repository"] + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } repositoryName = NormalizeLocalName(repositoryName) tagName := vars["tag"] - tags, exists := testRepositories[repositoryName] + tags, exists := testRepositories[repositoryName.String()] if !exists { - tags := make(map[string]string) - testRepositories[repositoryName] = tags + tags = make(map[string]string) + testRepositories[repositoryName.String()] = tags } tagValue := "" readJSON(r, tagValue) diff --git a/registry/registry_test.go b/registry/registry_test.go index 7714310d93..2bc1edff73 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/cliconfig" ) @@ -214,13 +215,21 @@ func TestGetRemoteImageLayer(t *testing.T) { func TestGetRemoteTag(t *testing.T) { r := spawnTestRegistrySession(t) - tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, REPO, "test") + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, repoRef, "test") if err != nil { t.Fatal(err) } assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) - _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, "foo42/baz", "foo") + bazRef, err := reference.ParseNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, bazRef, "foo") if err != ErrRepoNotFound { t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") } @@ -228,7 +237,11 @@ func TestGetRemoteTag(t *testing.T) { func TestGetRemoteTags(t *testing.T) { r := spawnTestRegistrySession(t) - tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, repoRef) if err != nil { t.Fatal(err) } @@ -236,7 +249,11 @@ func TestGetRemoteTags(t *testing.T) { assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) - _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz") + bazRef, err := reference.ParseNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, bazRef) if err != ErrRepoNotFound { t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") } @@ -249,7 +266,11 @@ func TestGetRepositoryData(t *testing.T) { t.Fatal(err) } host := "http://" + parsedURL.Host + "/v1/" - data, err := r.GetRepositoryData("foo42/bar") + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + data, err := r.GetRepositoryData(repoRef) if err != nil { t.Fatal(err) } @@ -315,29 +336,41 @@ func TestValidateRepositoryName(t *testing.T) { } for _, name := range invalidRepoNames { - err := ValidateRepositoryName(name) - assertNotEqual(t, err, nil, "Expected invalid repo name: "+name) + named, err := reference.WithName(name) + if err == nil { + err := ValidateRepositoryName(named) + assertNotEqual(t, err, nil, "Expected invalid repo name: "+name) + } } for _, name := range validRepoNames { - err := ValidateRepositoryName(name) + named, err := reference.WithName(name) + if err != nil { + t.Fatalf("could not parse valid name: %s", name) + } + err = ValidateRepositoryName(named) assertEqual(t, err, nil, "Expected valid repo name: "+name) } - - err := ValidateRepositoryName(invalidRepoNames[0]) - assertEqual(t, err, ErrInvalidRepositoryName, "Expected ErrInvalidRepositoryName: "+invalidRepoNames[0]) } func TestParseRepositoryInfo(t *testing.T) { + withName := func(name string) reference.Named { + named, err := reference.WithName(name) + if err != nil { + t.Fatalf("could not parse reference %s", name) + } + return named + } + expectedRepoInfos := map[string]RepositoryInfo{ "fooo/bar": { Index: &IndexInfo{ Name: IndexName, Official: true, }, - RemoteName: "fooo/bar", - LocalName: "fooo/bar", - CanonicalName: "docker.io/fooo/bar", + RemoteName: withName("fooo/bar"), + LocalName: withName("fooo/bar"), + CanonicalName: withName("docker.io/fooo/bar"), Official: false, }, "library/ubuntu": { @@ -345,9 +378,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: IndexName, Official: true, }, - RemoteName: "library/ubuntu", - LocalName: "ubuntu", - CanonicalName: "docker.io/library/ubuntu", + RemoteName: withName("library/ubuntu"), + LocalName: withName("ubuntu"), + CanonicalName: withName("docker.io/library/ubuntu"), Official: true, }, "nonlibrary/ubuntu": { @@ -355,9 +388,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: IndexName, Official: true, }, - RemoteName: "nonlibrary/ubuntu", - LocalName: "nonlibrary/ubuntu", - CanonicalName: "docker.io/nonlibrary/ubuntu", + RemoteName: withName("nonlibrary/ubuntu"), + LocalName: withName("nonlibrary/ubuntu"), + CanonicalName: withName("docker.io/nonlibrary/ubuntu"), Official: false, }, "ubuntu": { @@ -365,9 +398,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: IndexName, Official: true, }, - RemoteName: "library/ubuntu", - LocalName: "ubuntu", - CanonicalName: "docker.io/library/ubuntu", + RemoteName: withName("library/ubuntu"), + LocalName: withName("ubuntu"), + CanonicalName: withName("docker.io/library/ubuntu"), Official: true, }, "other/library": { @@ -375,9 +408,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: IndexName, Official: true, }, - RemoteName: "other/library", - LocalName: "other/library", - CanonicalName: "docker.io/other/library", + RemoteName: withName("other/library"), + LocalName: withName("other/library"), + CanonicalName: withName("docker.io/other/library"), Official: false, }, "127.0.0.1:8000/private/moonbase": { @@ -385,9 +418,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: "127.0.0.1:8000", Official: false, }, - RemoteName: "private/moonbase", - LocalName: "127.0.0.1:8000/private/moonbase", - CanonicalName: "127.0.0.1:8000/private/moonbase", + RemoteName: withName("private/moonbase"), + LocalName: withName("127.0.0.1:8000/private/moonbase"), + CanonicalName: withName("127.0.0.1:8000/private/moonbase"), Official: false, }, "127.0.0.1:8000/privatebase": { @@ -395,9 +428,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: "127.0.0.1:8000", Official: false, }, - RemoteName: "privatebase", - LocalName: "127.0.0.1:8000/privatebase", - CanonicalName: "127.0.0.1:8000/privatebase", + RemoteName: withName("privatebase"), + LocalName: withName("127.0.0.1:8000/privatebase"), + CanonicalName: withName("127.0.0.1:8000/privatebase"), Official: false, }, "localhost:8000/private/moonbase": { @@ -405,9 +438,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: "localhost:8000", Official: false, }, - RemoteName: "private/moonbase", - LocalName: "localhost:8000/private/moonbase", - CanonicalName: "localhost:8000/private/moonbase", + RemoteName: withName("private/moonbase"), + LocalName: withName("localhost:8000/private/moonbase"), + CanonicalName: withName("localhost:8000/private/moonbase"), Official: false, }, "localhost:8000/privatebase": { @@ -415,9 +448,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: "localhost:8000", Official: false, }, - RemoteName: "privatebase", - LocalName: "localhost:8000/privatebase", - CanonicalName: "localhost:8000/privatebase", + RemoteName: withName("privatebase"), + LocalName: withName("localhost:8000/privatebase"), + CanonicalName: withName("localhost:8000/privatebase"), Official: false, }, "example.com/private/moonbase": { @@ -425,9 +458,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: "example.com", Official: false, }, - RemoteName: "private/moonbase", - LocalName: "example.com/private/moonbase", - CanonicalName: "example.com/private/moonbase", + RemoteName: withName("private/moonbase"), + LocalName: withName("example.com/private/moonbase"), + CanonicalName: withName("example.com/private/moonbase"), Official: false, }, "example.com/privatebase": { @@ -435,9 +468,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: "example.com", Official: false, }, - RemoteName: "privatebase", - LocalName: "example.com/privatebase", - CanonicalName: "example.com/privatebase", + RemoteName: withName("privatebase"), + LocalName: withName("example.com/privatebase"), + CanonicalName: withName("example.com/privatebase"), Official: false, }, "example.com:8000/private/moonbase": { @@ -445,9 +478,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: "example.com:8000", Official: false, }, - RemoteName: "private/moonbase", - LocalName: "example.com:8000/private/moonbase", - CanonicalName: "example.com:8000/private/moonbase", + RemoteName: withName("private/moonbase"), + LocalName: withName("example.com:8000/private/moonbase"), + CanonicalName: withName("example.com:8000/private/moonbase"), Official: false, }, "example.com:8000/privatebase": { @@ -455,9 +488,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: "example.com:8000", Official: false, }, - RemoteName: "privatebase", - LocalName: "example.com:8000/privatebase", - CanonicalName: "example.com:8000/privatebase", + RemoteName: withName("privatebase"), + LocalName: withName("example.com:8000/privatebase"), + CanonicalName: withName("example.com:8000/privatebase"), Official: false, }, "localhost/private/moonbase": { @@ -465,9 +498,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: "localhost", Official: false, }, - RemoteName: "private/moonbase", - LocalName: "localhost/private/moonbase", - CanonicalName: "localhost/private/moonbase", + RemoteName: withName("private/moonbase"), + LocalName: withName("localhost/private/moonbase"), + CanonicalName: withName("localhost/private/moonbase"), Official: false, }, "localhost/privatebase": { @@ -475,9 +508,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: "localhost", Official: false, }, - RemoteName: "privatebase", - LocalName: "localhost/privatebase", - CanonicalName: "localhost/privatebase", + RemoteName: withName("privatebase"), + LocalName: withName("localhost/privatebase"), + CanonicalName: withName("localhost/privatebase"), Official: false, }, IndexName + "/public/moonbase": { @@ -485,9 +518,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: IndexName, Official: true, }, - RemoteName: "public/moonbase", - LocalName: "public/moonbase", - CanonicalName: "docker.io/public/moonbase", + RemoteName: withName("public/moonbase"), + LocalName: withName("public/moonbase"), + CanonicalName: withName("docker.io/public/moonbase"), Official: false, }, "index." + IndexName + "/public/moonbase": { @@ -495,9 +528,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: IndexName, Official: true, }, - RemoteName: "public/moonbase", - LocalName: "public/moonbase", - CanonicalName: "docker.io/public/moonbase", + RemoteName: withName("public/moonbase"), + LocalName: withName("public/moonbase"), + CanonicalName: withName("docker.io/public/moonbase"), Official: false, }, "ubuntu-12.04-base": { @@ -505,9 +538,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: IndexName, Official: true, }, - RemoteName: "library/ubuntu-12.04-base", - LocalName: "ubuntu-12.04-base", - CanonicalName: "docker.io/library/ubuntu-12.04-base", + RemoteName: withName("library/ubuntu-12.04-base"), + LocalName: withName("ubuntu-12.04-base"), + CanonicalName: withName("docker.io/library/ubuntu-12.04-base"), Official: true, }, IndexName + "/ubuntu-12.04-base": { @@ -515,9 +548,9 @@ func TestParseRepositoryInfo(t *testing.T) { Name: IndexName, Official: true, }, - RemoteName: "library/ubuntu-12.04-base", - LocalName: "ubuntu-12.04-base", - CanonicalName: "docker.io/library/ubuntu-12.04-base", + RemoteName: withName("library/ubuntu-12.04-base"), + LocalName: withName("ubuntu-12.04-base"), + CanonicalName: withName("docker.io/library/ubuntu-12.04-base"), Official: true, }, "index." + IndexName + "/ubuntu-12.04-base": { @@ -525,22 +558,27 @@ func TestParseRepositoryInfo(t *testing.T) { Name: IndexName, Official: true, }, - RemoteName: "library/ubuntu-12.04-base", - LocalName: "ubuntu-12.04-base", - CanonicalName: "docker.io/library/ubuntu-12.04-base", + RemoteName: withName("library/ubuntu-12.04-base"), + LocalName: withName("ubuntu-12.04-base"), + CanonicalName: withName("docker.io/library/ubuntu-12.04-base"), Official: true, }, } for reposName, expectedRepoInfo := range expectedRepoInfos { - repoInfo, err := ParseRepositoryInfo(reposName) + named, err := reference.WithName(reposName) + if err != nil { + t.Error(err) + } + + repoInfo, err := ParseRepositoryInfo(named) if err != nil { t.Error(err) } else { checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) - checkEqual(t, repoInfo.RemoteName, expectedRepoInfo.RemoteName, reposName) - checkEqual(t, repoInfo.LocalName, expectedRepoInfo.LocalName, reposName) - checkEqual(t, repoInfo.CanonicalName, expectedRepoInfo.CanonicalName, reposName) + checkEqual(t, repoInfo.RemoteName.String(), expectedRepoInfo.RemoteName.String(), reposName) + checkEqual(t, repoInfo.LocalName.String(), expectedRepoInfo.LocalName.String(), reposName) + checkEqual(t, repoInfo.CanonicalName.String(), expectedRepoInfo.CanonicalName.String(), reposName) checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) } @@ -687,8 +725,11 @@ func TestMirrorEndpointLookup(t *testing.T) { return false } s := Service{Config: makeServiceConfig([]string{"my.mirror"}, nil)} - imageName := IndexName + "/test/image" + imageName, err := reference.WithName(IndexName + "/test/image") + if err != nil { + t.Error(err) + } pushAPIEndpoints, err := s.LookupPushEndpoints(imageName) if err != nil { t.Fatal(err) @@ -708,7 +749,11 @@ func TestMirrorEndpointLookup(t *testing.T) { func TestPushRegistryTag(t *testing.T) { r := spawnTestRegistrySession(t) - err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/")) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + err = r.PushRegistryTag(repoRef, imageID, "stable", makeURL("/v1/")) if err != nil { t.Fatal(err) } @@ -726,14 +771,18 @@ func TestPushImageJSONIndex(t *testing.T) { Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", }, } - repoData, err := r.PushImageJSONIndex("foo42/bar", imgData, false, nil) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + repoData, err := r.PushImageJSONIndex(repoRef, imgData, false, nil) if err != nil { t.Fatal(err) } if repoData == nil { t.Fatal("Expected RepositoryData object") } - repoData, err = r.PushImageJSONIndex("foo42/bar", imgData, true, []string{r.indexEndpoint.String()}) + repoData, err = r.PushImageJSONIndex(repoRef, imgData, true, []string{r.indexEndpoint.String()}) if err != nil { t.Fatal(err) } @@ -781,7 +830,11 @@ func TestValidRemoteName(t *testing.T) { "dock__er/docker", } for _, repositoryName := range validRepositoryNames { - if err := validateRemoteName(repositoryName); err != nil { + repositoryRef, err := reference.WithName(repositoryName) + if err != nil { + t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) + } + if err := validateRemoteName(repositoryRef); err != nil { t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) } } @@ -818,7 +871,11 @@ func TestValidRemoteName(t *testing.T) { "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", } for _, repositoryName := range invalidRepositoryNames { - if err := validateRemoteName(repositoryName); err == nil { + repositoryRef, err := reference.ParseNamed(repositoryName) + if err != nil { + continue + } + if err := validateRemoteName(repositoryRef); err == nil { t.Errorf("Repository name should be invalid: %v", repositoryName) } } diff --git a/registry/service.go b/registry/service.go index 6ac930d6e3..1ef9682785 100644 --- a/registry/service.go +++ b/registry/service.go @@ -4,7 +4,9 @@ import ( "crypto/tls" "net/http" "net/url" + "strings" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/auth" "github.com/docker/docker/cliconfig" ) @@ -51,17 +53,39 @@ func (s *Service) Auth(authConfig *cliconfig.AuthConfig) (string, error) { return Login(authConfig, endpoint) } +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + var indexName, remoteName string + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + // 'docker.io' + indexName = IndexName + remoteName = reposName + } else { + indexName = nameParts[0] + remoteName = nameParts[1] + } + return indexName, remoteName +} + // Search queries the public registry for images matching the specified // search terms, and returns the results. func (s *Service) Search(term string, authConfig *cliconfig.AuthConfig, headers map[string][]string) (*SearchResults, error) { + if err := validateNoSchema(term); err != nil { + return nil, err + } - repoInfo, err := s.ResolveRepositoryBySearch(term) + indexName, remoteName := splitReposSearchTerm(term) + + index, err := s.Config.NewIndexInfo(indexName) if err != nil { return nil, err } // *TODO: Search multiple indexes. - endpoint, err := NewEndpoint(repoInfo.Index, http.Header(headers), APIVersionUnknown) + endpoint, err := NewEndpoint(index, http.Header(headers), APIVersionUnknown) if err != nil { return nil, err } @@ -70,19 +94,23 @@ func (s *Service) Search(term string, authConfig *cliconfig.AuthConfig, headers if err != nil { return nil, err } - return r.SearchRepositories(repoInfo.GetSearchTerm()) + + if index.Official { + localName := remoteName + if strings.HasPrefix(localName, "library/") { + // If pull "library/foo", it's stored locally under "foo" + localName = strings.SplitN(localName, "/", 2)[1] + } + + return r.SearchRepositories(localName) + } + return r.SearchRepositories(remoteName) } // ResolveRepository splits a repository name into its components // and configuration of the associated registry. -func (s *Service) ResolveRepository(name string) (*RepositoryInfo, error) { - return s.Config.NewRepositoryInfo(name, false) -} - -// ResolveRepositoryBySearch splits a repository name into its components -// and configuration of the associated registry. -func (s *Service) ResolveRepositoryBySearch(name string) (*RepositoryInfo, error) { - return s.Config.NewRepositoryInfo(name, true) +func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { + return s.Config.NewRepositoryInfo(name) } // ResolveIndex takes indexName and returns index info @@ -123,14 +151,14 @@ func (s *Service) tlsConfigForMirror(mirror string) (*tls.Config, error) { // LookupPullEndpoints creates an list of endpoints to try to pull from, in order of preference. // It gives preference to v2 endpoints over v1, mirrors over the actual // registry, and HTTPS over plain HTTP. -func (s *Service) LookupPullEndpoints(repoName string) (endpoints []APIEndpoint, err error) { +func (s *Service) LookupPullEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { return s.lookupEndpoints(repoName) } // LookupPushEndpoints creates an list of endpoints to try to push to, in order of preference. // It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. // Mirrors are not included. -func (s *Service) LookupPushEndpoints(repoName string) (endpoints []APIEndpoint, err error) { +func (s *Service) LookupPushEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { allEndpoints, err := s.lookupEndpoints(repoName) if err == nil { for _, endpoint := range allEndpoints { @@ -142,7 +170,7 @@ func (s *Service) LookupPushEndpoints(repoName string) (endpoints []APIEndpoint, return endpoints, err } -func (s *Service) lookupEndpoints(repoName string) (endpoints []APIEndpoint, err error) { +func (s *Service) lookupEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { endpoints, err = s.lookupV2Endpoints(repoName) if err != nil { return nil, err diff --git a/registry/service_v1.go b/registry/service_v1.go index ddb78ee60a..5fdc1ececf 100644 --- a/registry/service_v1.go +++ b/registry/service_v1.go @@ -4,13 +4,15 @@ import ( "fmt" "strings" + "github.com/docker/distribution/reference" "github.com/docker/docker/pkg/tlsconfig" ) -func (s *Service) lookupV1Endpoints(repoName string) (endpoints []APIEndpoint, err error) { +func (s *Service) lookupV1Endpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { var cfg = tlsconfig.ServerDefault tlsConfig := &cfg - if strings.HasPrefix(repoName, DefaultNamespace+"/") { + nameString := repoName.Name() + if strings.HasPrefix(nameString, DefaultNamespace+"/") { endpoints = append(endpoints, APIEndpoint{ URL: DefaultV1Registry, Version: APIVersion1, @@ -21,11 +23,11 @@ func (s *Service) lookupV1Endpoints(repoName string) (endpoints []APIEndpoint, e return endpoints, nil } - slashIndex := strings.IndexRune(repoName, '/') + slashIndex := strings.IndexRune(nameString, '/') if slashIndex <= 0 { - return nil, fmt.Errorf("invalid repo name: missing '/': %s", repoName) + return nil, fmt.Errorf("invalid repo name: missing '/': %s", nameString) } - hostname := repoName[:slashIndex] + hostname := nameString[:slashIndex] tlsConfig, err = s.TLSConfig(hostname) if err != nil { diff --git a/registry/service_v2.go b/registry/service_v2.go index 70d5fd710e..56a3d2eeed 100644 --- a/registry/service_v2.go +++ b/registry/service_v2.go @@ -4,14 +4,16 @@ import ( "fmt" "strings" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/auth" "github.com/docker/docker/pkg/tlsconfig" ) -func (s *Service) lookupV2Endpoints(repoName string) (endpoints []APIEndpoint, err error) { +func (s *Service) lookupV2Endpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { var cfg = tlsconfig.ServerDefault tlsConfig := &cfg - if strings.HasPrefix(repoName, DefaultNamespace+"/") { + nameString := repoName.Name() + if strings.HasPrefix(nameString, DefaultNamespace+"/") { // v2 mirrors for _, mirror := range s.Config.Mirrors { mirrorTLSConfig, err := s.tlsConfigForMirror(mirror) @@ -39,11 +41,11 @@ func (s *Service) lookupV2Endpoints(repoName string) (endpoints []APIEndpoint, e return endpoints, nil } - slashIndex := strings.IndexRune(repoName, '/') + slashIndex := strings.IndexRune(nameString, '/') if slashIndex <= 0 { - return nil, fmt.Errorf("invalid repo name: missing '/': %s", repoName) + return nil, fmt.Errorf("invalid repo name: missing '/': %s", nameString) } - hostname := repoName[:slashIndex] + hostname := nameString[:slashIndex] tlsConfig, err = s.TLSConfig(hostname) if err != nil { diff --git a/registry/session.go b/registry/session.go index 2a20d32190..645e5d44b3 100644 --- a/registry/session.go +++ b/registry/session.go @@ -20,6 +20,7 @@ import ( "time" "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" "github.com/docker/docker/cliconfig" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/ioutils" @@ -320,7 +321,9 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io // repository. It queries each of the registries supplied in the registries // argument, and returns data from the first one that answers the query // successfully. -func (r *Session) GetRemoteTag(registries []string, repository string, askedTag string) (string, error) { +func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { + repository := repositoryRef.Name() + if strings.Count(repository, "/") == 0 { // This will be removed once the registry supports auto-resolution on // the "library" namespace @@ -356,7 +359,9 @@ func (r *Session) GetRemoteTag(registries []string, repository string, askedTag // of the registries supplied in the registries argument, and returns data from // the first one that answers the query successfully. It returns a map with // tag names as the keys and image IDs as the values. -func (r *Session) GetRemoteTags(registries []string, repository string) (map[string]string, error) { +func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { + repository := repositoryRef.Name() + if strings.Count(repository, "/") == 0 { // This will be removed once the registry supports auto-resolution on // the "library" namespace @@ -408,8 +413,8 @@ func buildEndpointsList(headers []string, indexEp string) ([]string, error) { } // GetRepositoryData returns lists of images and endpoints for the repository -func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { - repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), remote) +func (r *Session) GetRepositoryData(remote reference.Named) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), remote.Name()) logrus.Debugf("[registry] Calling GET %s", repositoryTarget) @@ -443,7 +448,7 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote, errBody), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote.Name(), errBody), res) } var endpoints []string @@ -595,10 +600,10 @@ func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry // PushRegistryTag pushes a tag on the registry. // Remote has the format '/ -func (r *Session) PushRegistryTag(remote, revision, tag, registry string) error { +func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { // "jsonify" the string revision = "\"" + revision + "\"" - path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag) + path := fmt.Sprintf("repositories/%s/tags/%s", remote.Name(), tag) req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) if err != nil { @@ -612,13 +617,13 @@ func (r *Session) PushRegistryTag(remote, revision, tag, registry string) error } res.Body.Close() if res.StatusCode != 200 && res.StatusCode != 201 { - return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) + return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.Name()), res) } return nil } // PushImageJSONIndex uploads an image list to the repository -func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { +func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { cleanImgList := []*ImgData{} if validate { for _, elem := range imgList { @@ -638,7 +643,7 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate if validate { suffix = "images" } - u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote, suffix) + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote.Name(), suffix) logrus.Debugf("[registry] PUT %s", u) logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) headers := map[string][]string{ @@ -676,7 +681,7 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote, errBody), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.Name(), errBody), res) } tokens = res.Header["X-Docker-Token"] logrus.Debugf("Auth token: %v", tokens) @@ -694,7 +699,7 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote, errBody), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.Name(), errBody), res) } } diff --git a/registry/types.go b/registry/types.go index 09b9d57134..8a201a9172 100644 --- a/registry/types.go +++ b/registry/types.go @@ -1,5 +1,9 @@ package registry +import ( + "github.com/docker/distribution/reference" +) + // SearchResult describes a search result returned from a registry type SearchResult struct { // StarCount indicates the number of stars this repository has @@ -126,13 +130,13 @@ type RepositoryInfo struct { Index *IndexInfo // RemoteName is the remote name of the repository, such as // "library/ubuntu-12.04-base" - RemoteName string + RemoteName reference.Named // LocalName is the local name of the repository, such as // "ubuntu-12.04-base" - LocalName string + LocalName reference.Named // CanonicalName is the canonical name of the repository, such as // "docker.io/library/ubuntu-12.04-base" - CanonicalName string + CanonicalName reference.Named // Official indicates whether the repository is considered official. // If the registry is official, and the normalized name does not // contain a '/' (e.g. "foo"), then it is considered an official repo. diff --git a/utils/utils.go b/utils/utils.go index d2e83f61a7..91246eafce 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -269,23 +269,6 @@ func ReadDockerIgnore(reader io.ReadCloser) ([]string, error) { return excludes, nil } -// ImageReference combines `repo` and `ref` and returns a string representing -// the combination. If `ref` is a digest (meaning it's of the form -// :, the returned string is @. Otherwise, -// ref is assumed to be a tag, and the returned string is :. -func ImageReference(repo, ref string) string { - if DigestReference(ref) { - return repo + "@" + ref - } - return repo + ":" + ref -} - -// DigestReference returns true if ref is a digest reference; i.e. if it -// is of the form :. -func DigestReference(ref string) bool { - return strings.Contains(ref, ":") -} - // GetErrorMessage returns the human readable message associated with // the passed-in error. In some cases the default Error() func returns // something that is less than useful so based on its types this func diff --git a/utils/utils_test.go b/utils/utils_test.go index 9acb8017a6..3dc5af88af 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -26,36 +26,6 @@ func TestReplaceAndAppendEnvVars(t *testing.T) { } } -func TestImageReference(t *testing.T) { - tests := []struct { - repo string - ref string - expected string - }{ - {"repo", "tag", "repo:tag"}, - {"repo", "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64", "repo@sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64"}, - } - - for i, test := range tests { - actual := ImageReference(test.repo, test.ref) - if test.expected != actual { - t.Errorf("%d: expected %q, got %q", i, test.expected, actual) - } - } -} - -func TestDigestReference(t *testing.T) { - input := "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64" - if !DigestReference(input) { - t.Errorf("Expected DigestReference=true for input %q", input) - } - - input = "latest" - if DigestReference(input) { - t.Errorf("Unexpected DigestReference=true for input %q", input) - } -} - func TestReadDockerIgnore(t *testing.T) { tmpDir, err := ioutil.TempDir("", "dockerignore-test") if err != nil { From ed4d236e04e1a5724de578654939e3a42f7466cc Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Wed, 18 Nov 2015 14:21:23 -0800 Subject: [PATCH 7/7] Remove graph package The graph package is no longer used for tag, image, or layer storage. Signed-off-by: Tonis Tiigi --- graph/export.go | 180 ---- graph/fixtures/validate_manifest/bad_manifest | 38 - .../validate_manifest/extra_data_manifest | 46 - .../fixtures/validate_manifest/good_manifest | 38 - .../validate_manifest/no_signature_manifest | 22 - graph/graph.go | 813 ------------------ graph/graph_test.go | 308 ------- graph/graph_unix.go | 8 - graph/graph_windows.go | 8 - graph/history.go | 119 --- graph/import.go | 73 -- graph/list.go | 185 ---- graph/load.go | 134 --- graph/load_unsupported.go | 14 - graph/pools_test.go | 44 - graph/pull.go | 157 ---- graph/pull_v1.go | 350 -------- graph/pull_v2.go | 728 ---------------- graph/pull_v2_test.go | 195 ----- graph/push.go | 126 --- graph/push_v1.go | 354 -------- graph/push_v2.go | 397 --------- graph/registry.go | 116 --- graph/service.go | 89 -- graph/tags.go | 431 ---------- graph/tags/tags.go | 36 - graph/tags/tags_unit_test.go | 23 - graph/tags_unit_test.go | 205 ----- 28 files changed, 5237 deletions(-) delete mode 100644 graph/export.go delete mode 100644 graph/fixtures/validate_manifest/bad_manifest delete mode 100644 graph/fixtures/validate_manifest/extra_data_manifest delete mode 100644 graph/fixtures/validate_manifest/good_manifest delete mode 100644 graph/fixtures/validate_manifest/no_signature_manifest delete mode 100644 graph/graph.go delete mode 100644 graph/graph_test.go delete mode 100644 graph/graph_unix.go delete mode 100644 graph/graph_windows.go delete mode 100644 graph/history.go delete mode 100644 graph/import.go delete mode 100644 graph/list.go delete mode 100644 graph/load.go delete mode 100644 graph/load_unsupported.go delete mode 100644 graph/pools_test.go delete mode 100644 graph/pull.go delete mode 100644 graph/pull_v1.go delete mode 100644 graph/pull_v2.go delete mode 100644 graph/pull_v2_test.go delete mode 100644 graph/push.go delete mode 100644 graph/push_v1.go delete mode 100644 graph/push_v2.go delete mode 100644 graph/registry.go delete mode 100644 graph/service.go delete mode 100644 graph/tags.go delete mode 100644 graph/tags/tags.go delete mode 100644 graph/tags/tags_unit_test.go delete mode 100644 graph/tags_unit_test.go diff --git a/graph/export.go b/graph/export.go deleted file mode 100644 index 3a47f318e4..0000000000 --- a/graph/export.go +++ /dev/null @@ -1,180 +0,0 @@ -package graph - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/registry" -) - -// ImageExport exports list of images to a output stream specified in the -// config. The exported images are archived into a tar when written to the -// output stream. All images with the given tag and all versions containing the -// same tag are exported. names is the set of tags to export, and outStream -// is the writer which the images are written to. -func (s *TagStore) ImageExport(names []string, outStream io.Writer) error { - // get image json - tempdir, err := ioutil.TempDir("", "docker-export-") - if err != nil { - return err - } - defer os.RemoveAll(tempdir) - - rootRepoMap := map[string]repository{} - addKey := func(name string, tag string, id string) { - logrus.Debugf("add key [%s:%s]", name, tag) - if repo, ok := rootRepoMap[name]; !ok { - rootRepoMap[name] = repository{tag: id} - } else { - repo[tag] = id - } - } - for _, name := range names { - name = registry.NormalizeLocalName(name) - logrus.Debugf("Serializing %s", name) - rootRepo := s.Repositories[name] - if rootRepo != nil { - // this is a base repo name, like 'busybox' - for tag, id := range rootRepo { - addKey(name, tag, id) - if err := s.exportImage(id, tempdir); err != nil { - return err - } - } - } else { - img, err := s.LookupImage(name) - if err != nil { - return err - } - - if img != nil { - // This is a named image like 'busybox:latest' - repoName, repoTag := parsers.ParseRepositoryTag(name) - - // Skip digests on save - if _, err := digest.ParseDigest(repoTag); err == nil { - repoTag = "" - } - - // check this length, because a lookup of a truncated has will not have a tag - // and will not need to be added to this map - if len(repoTag) > 0 { - addKey(repoName, repoTag, img.ID) - } - if err := s.exportImage(img.ID, tempdir); err != nil { - return err - } - - } else { - // this must be an ID that didn't get looked up just right? - if err := s.exportImage(name, tempdir); err != nil { - return err - } - } - } - logrus.Debugf("End Serializing %s", name) - } - // write repositories, if there is something to write - if len(rootRepoMap) > 0 { - f, err := os.OpenFile(filepath.Join(tempdir, "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - f.Close() - return err - } - if err := json.NewEncoder(f).Encode(rootRepoMap); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - if err := os.Chtimes(filepath.Join(tempdir, "repositories"), time.Unix(0, 0), time.Unix(0, 0)); err != nil { - return err - } - } else { - logrus.Debugf("There were no repositories to write") - } - - fs, err := archive.Tar(tempdir, archive.Uncompressed) - if err != nil { - return err - } - defer fs.Close() - - if _, err := io.Copy(outStream, fs); err != nil { - return err - } - logrus.Debugf("End export image") - return nil -} - -func (s *TagStore) exportImage(name, tempdir string) error { - for n := name; n != ""; { - img, err := s.LookupImage(n) - if err != nil || img == nil { - return fmt.Errorf("No such image %s", n) - } - - // temporary directory - tmpImageDir := filepath.Join(tempdir, n) - if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil { - if os.IsExist(err) { - return nil - } - return err - } - - var version = "1.0" - var versionBuf = []byte(version) - - if err := ioutil.WriteFile(filepath.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil { - return err - } - - imageInspectRaw, err := json.Marshal(img) - if err != nil { - return err - } - - // serialize json - json, err := os.Create(filepath.Join(tmpImageDir, "json")) - if err != nil { - return err - } - - written, err := json.Write(imageInspectRaw) - if err != nil { - return err - } - if written != len(imageInspectRaw) { - logrus.Warnf("%d byes should have been written instead %d have been written", written, len(imageInspectRaw)) - } - - // serialize filesystem - fsTar, err := os.Create(filepath.Join(tmpImageDir, "layer.tar")) - if err != nil { - return err - } - if err := s.imageTarLayer(n, fsTar); err != nil { - return err - } - - for _, fname := range []string{"", "VERSION", "json", "layer.tar"} { - if err := os.Chtimes(filepath.Join(tmpImageDir, fname), img.Created, img.Created); err != nil { - return err - } - } - - // try again with parent - n = img.Parent - } - return nil -} diff --git a/graph/fixtures/validate_manifest/bad_manifest b/graph/fixtures/validate_manifest/bad_manifest deleted file mode 100644 index a1f02a62a3..0000000000 --- a/graph/fixtures/validate_manifest/bad_manifest +++ /dev/null @@ -1,38 +0,0 @@ -{ - "schemaVersion": 2, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} diff --git a/graph/fixtures/validate_manifest/extra_data_manifest b/graph/fixtures/validate_manifest/extra_data_manifest deleted file mode 100644 index beec19a801..0000000000 --- a/graph/fixtures/validate_manifest/extra_data_manifest +++ /dev/null @@ -1,46 +0,0 @@ -{ - "schemaVersion": 1, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "fsLayers": [ - { - "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} diff --git a/graph/fixtures/validate_manifest/good_manifest b/graph/fixtures/validate_manifest/good_manifest deleted file mode 100644 index b107de3226..0000000000 --- a/graph/fixtures/validate_manifest/good_manifest +++ /dev/null @@ -1,38 +0,0 @@ -{ - "schemaVersion": 1, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} \ No newline at end of file diff --git a/graph/fixtures/validate_manifest/no_signature_manifest b/graph/fixtures/validate_manifest/no_signature_manifest deleted file mode 100644 index 7a79540af4..0000000000 --- a/graph/fixtures/validate_manifest/no_signature_manifest +++ /dev/null @@ -1,22 +0,0 @@ -{ - "schemaVersion": 1, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ] -} diff --git a/graph/graph.go b/graph/graph.go deleted file mode 100644 index 9d2f0a0238..0000000000 --- a/graph/graph.go +++ /dev/null @@ -1,813 +0,0 @@ -package graph - -import ( - "compress/gzip" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/locker" - "github.com/docker/docker/pkg/progressreader" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/truncindex" - "github.com/docker/docker/runconfig" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// v1Descriptor is a non-content-addressable image descriptor -type v1Descriptor struct { - img *image.Image -} - -// ID returns the image ID specified in the image structure. -func (img v1Descriptor) ID() string { - return img.img.ID -} - -// Parent returns the parent ID specified in the image structure. -func (img v1Descriptor) Parent() string { - return img.img.Parent -} - -// MarshalConfig renders the image structure into JSON. -func (img v1Descriptor) MarshalConfig() ([]byte, error) { - return json.Marshal(img.img) -} - -// The type is used to protect pulling or building related image -// layers from deleteing when filtered by dangling=true -// The key of layers is the images ID which is pulling or building -// The value of layers is a slice which hold layer IDs referenced to -// pulling or building images -type retainedLayers struct { - layerHolders map[string]map[string]struct{} // map[layerID]map[sessionID] - sync.Mutex -} - -func (r *retainedLayers) Add(sessionID string, layerIDs []string) { - r.Lock() - defer r.Unlock() - for _, layerID := range layerIDs { - if r.layerHolders[layerID] == nil { - r.layerHolders[layerID] = map[string]struct{}{} - } - r.layerHolders[layerID][sessionID] = struct{}{} - } -} - -func (r *retainedLayers) Delete(sessionID string, layerIDs []string) { - r.Lock() - defer r.Unlock() - for _, layerID := range layerIDs { - holders, ok := r.layerHolders[layerID] - if !ok { - continue - } - delete(holders, sessionID) - if len(holders) == 0 { - delete(r.layerHolders, layerID) // Delete any empty reference set. - } - } -} - -func (r *retainedLayers) Exists(layerID string) bool { - r.Lock() - _, exists := r.layerHolders[layerID] - r.Unlock() - return exists -} - -// A Graph is a store for versioned filesystem images and the relationship between them. -type Graph struct { - root string - idIndex *truncindex.TruncIndex - driver graphdriver.Driver - imagesMutex sync.Mutex - imageMutex locker.Locker // protect images in driver. - retained *retainedLayers - tarSplitDisabled bool - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - - // access to parentRefs must be protected with imageMutex locking the image id - // on the key of the map (e.g. imageMutex.Lock(img.ID), parentRefs[img.ID]...) - parentRefs map[string]int -} - -// file names for ./graph// -const ( - jsonFileName = "json" - layersizeFileName = "layersize" - digestFileName = "checksum" - tarDataFileName = "tar-data.json.gz" - v1CompatibilityFileName = "v1Compatibility" - parentFileName = "parent" -) - -var ( - // errDigestNotSet is used when request the digest for a layer - // but the layer has no digest value or content to compute the - // the digest. - errDigestNotSet = errors.New("digest is not set for layer") -) - -// NewGraph instantiates a new graph at the given root path in the filesystem. -// `root` will be created if it doesn't exist. -func NewGraph(root string, driver graphdriver.Driver, uidMaps, gidMaps []idtools.IDMap) (*Graph, error) { - abspath, err := filepath.Abs(root) - if err != nil { - return nil, err - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - // Create the root directory if it doesn't exists - if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { - return nil, err - } - - graph := &Graph{ - root: abspath, - idIndex: truncindex.NewTruncIndex([]string{}), - driver: driver, - retained: &retainedLayers{layerHolders: make(map[string]map[string]struct{})}, - uidMaps: uidMaps, - gidMaps: gidMaps, - parentRefs: make(map[string]int), - } - - // Windows does not currently support tarsplit functionality. - if runtime.GOOS == "windows" { - graph.tarSplitDisabled = true - } - - if err := graph.restore(); err != nil { - return nil, err - } - return graph, nil -} - -// IsHeld returns whether the given layerID is being used by an ongoing pull or build. -func (graph *Graph) IsHeld(layerID string) bool { - return graph.retained.Exists(layerID) -} - -func (graph *Graph) restore() error { - dir, err := ioutil.ReadDir(graph.root) - if err != nil { - return err - } - var ids = []string{} - for _, v := range dir { - id := v.Name() - if graph.driver.Exists(id) { - img, err := graph.loadImage(id) - if err != nil { - logrus.Warnf("ignoring image %s, it could not be restored: %v", id, err) - continue - } - graph.imageMutex.Lock(img.Parent) - graph.parentRefs[img.Parent]++ - graph.imageMutex.Unlock(img.Parent) - ids = append(ids, id) - } - } - - graph.idIndex = truncindex.NewTruncIndex(ids) - logrus.Debugf("Restored %d elements", len(ids)) - return nil -} - -// IsNotExist detects whether an image exists by parsing the incoming error -// message. -func (graph *Graph) IsNotExist(err error, id string) bool { - // FIXME: Implement error subclass instead of looking at the error text - // Note: This is the way golang implements os.IsNotExists on Plan9 - return err != nil && (strings.Contains(strings.ToLower(err.Error()), "does not exist") || strings.Contains(strings.ToLower(err.Error()), "no such")) && strings.Contains(err.Error(), id) -} - -// Exists returns true if an image is registered at the given id. -// If the image doesn't exist or if an error is encountered, false is returned. -func (graph *Graph) Exists(id string) bool { - if _, err := graph.Get(id); err != nil { - return false - } - return true -} - -// Get returns the image with the given id, or an error if the image doesn't exist. -func (graph *Graph) Get(name string) (*image.Image, error) { - id, err := graph.idIndex.Get(name) - if err != nil { - if err == truncindex.ErrNotExist { - return nil, fmt.Errorf("image %s does not exist", name) - } - return nil, err - } - img, err := graph.loadImage(id) - if err != nil { - return nil, err - } - if img.ID != id { - return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID) - } - - if img.Size < 0 { - size, err := graph.driver.DiffSize(img.ID, img.Parent) - if err != nil { - return nil, fmt.Errorf("unable to calculate size of image id %q: %s", img.ID, err) - } - - img.Size = size - if err := graph.saveSize(graph.imageRoot(id), img.Size); err != nil { - return nil, err - } - } - return img, nil -} - -// Create creates a new image and registers it in the graph. -func (graph *Graph) Create(layerData io.Reader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) { - img := &image.Image{ - ID: stringid.GenerateRandomID(), - Comment: comment, - Created: time.Now().UTC(), - DockerVersion: dockerversion.Version, - Author: author, - Config: config, - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - } - - if containerID != "" { - img.Parent = containerImage - img.Container = containerID - img.ContainerConfig = *containerConfig - } - - if err := graph.Register(v1Descriptor{img}, layerData); err != nil { - return nil, err - } - return img, nil -} - -// Register imports a pre-existing image into the graph. -// Returns nil if the image is already registered. -func (graph *Graph) Register(im image.Descriptor, layerData io.Reader) (err error) { - imgID := im.ID() - - if err := image.ValidateID(imgID); err != nil { - return err - } - - // this is needed cause pull_v2 attemptIDReuse could deadlock - graph.imagesMutex.Lock() - defer graph.imagesMutex.Unlock() - - // We need this entire operation to be atomic within the engine. Note that - // this doesn't mean Register is fully safe yet. - graph.imageMutex.Lock(imgID) - defer graph.imageMutex.Unlock(imgID) - - return graph.register(im, layerData) -} - -func (graph *Graph) register(im image.Descriptor, layerData io.Reader) (err error) { - imgID := im.ID() - - // Skip register if image is already registered - if graph.Exists(imgID) { - return nil - } - - // The returned `error` must be named in this function's signature so that - // `err` is not shadowed in this deferred cleanup. - defer func() { - // If any error occurs, remove the new dir from the driver. - // Don't check for errors since the dir might not have been created. - if err != nil { - graph.driver.Remove(imgID) - } - }() - - // Ensure that the image root does not exist on the filesystem - // when it is not registered in the graph. - // This is common when you switch from one graph driver to another - if err := os.RemoveAll(graph.imageRoot(imgID)); err != nil && !os.IsNotExist(err) { - return err - } - - // If the driver has this ID but the graph doesn't, remove it from the driver to start fresh. - // (the graph is the source of truth). - // Ignore errors, since we don't know if the driver correctly returns ErrNotExist. - // (FIXME: make that mandatory for drivers). - graph.driver.Remove(imgID) - - tmp, err := graph.mktemp() - if err != nil { - return err - } - defer os.RemoveAll(tmp) - - parent := im.Parent() - - // Create root filesystem in the driver - if err := createRootFilesystemInDriver(graph, imgID, parent); err != nil { - return err - } - - // Apply the diff/layer - config, err := im.MarshalConfig() - if err != nil { - return err - } - if err := graph.storeImage(imgID, parent, config, layerData, tmp); err != nil { - return err - } - // Commit - if err := os.Rename(tmp, graph.imageRoot(imgID)); err != nil { - return err - } - - graph.idIndex.Add(imgID) - - graph.imageMutex.Lock(parent) - graph.parentRefs[parent]++ - graph.imageMutex.Unlock(parent) - - return nil -} - -func createRootFilesystemInDriver(graph *Graph, id, parent string) error { - if err := graph.driver.Create(id, parent, ""); err != nil { - return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, id, err) - } - return nil -} - -// TempLayerArchive creates a temporary archive of the given image's filesystem layer. -// The archive is stored on disk and will be automatically deleted as soon as has been read. -// If output is not nil, a human-readable progress bar will be written to it. -func (graph *Graph) tempLayerArchive(id string, sf *streamformatter.StreamFormatter, output io.Writer) (*archive.TempArchive, error) { - image, err := graph.Get(id) - if err != nil { - return nil, err - } - tmp, err := graph.mktemp() - if err != nil { - return nil, err - } - defer os.RemoveAll(tmp) - a, err := graph.tarLayer(image) - if err != nil { - return nil, err - } - progressReader := progressreader.New(progressreader.Config{ - In: a, - Out: output, - Formatter: sf, - Size: 0, - NewLines: false, - ID: stringid.TruncateID(id), - Action: "Buffering to disk", - }) - defer progressReader.Close() - return archive.NewTempArchive(progressReader, tmp) -} - -// mktemp creates a temporary sub-directory inside the graph's filesystem. -func (graph *Graph) mktemp() (string, error) { - dir := filepath.Join(graph.root, "_tmp", stringid.GenerateNonCryptoID()) - rootUID, rootGID, err := idtools.GetRootUIDGID(graph.uidMaps, graph.gidMaps) - if err != nil { - return "", err - } - if err := idtools.MkdirAllAs(dir, 0700, rootUID, rootGID); err != nil { - return "", err - } - return dir, nil -} - -// Delete atomically removes an image from the graph. -func (graph *Graph) Delete(name string) error { - id, err := graph.idIndex.Get(name) - if err != nil { - return err - } - img, err := graph.Get(id) - if err != nil { - return err - } - graph.idIndex.Delete(id) - tmp, err := graph.mktemp() - if err != nil { - tmp = graph.imageRoot(id) - } else { - if err := os.Rename(graph.imageRoot(id), tmp); err != nil { - // On err make tmp point to old dir and cleanup unused tmp dir - os.RemoveAll(tmp) - tmp = graph.imageRoot(id) - } - } - // Remove rootfs data from the driver - graph.driver.Remove(id) - - graph.imageMutex.Lock(img.Parent) - graph.parentRefs[img.Parent]-- - if graph.parentRefs[img.Parent] == 0 { - delete(graph.parentRefs, img.Parent) - } - graph.imageMutex.Unlock(img.Parent) - - // Remove the trashed image directory - return os.RemoveAll(tmp) -} - -// Map returns a list of all images in the graph, addressable by ID. -func (graph *Graph) Map() map[string]*image.Image { - images := make(map[string]*image.Image) - graph.walkAll(func(image *image.Image) { - images[image.ID] = image - }) - return images -} - -// walkAll iterates over each image in the graph, and passes it to a handler. -// The walking order is undetermined. -func (graph *Graph) walkAll(handler func(*image.Image)) { - graph.idIndex.Iterate(func(id string) { - img, err := graph.Get(id) - if err != nil { - return - } - if handler != nil { - handler(img) - } - }) -} - -// ByParent returns a lookup table of images by their parent. -// If an image of key ID has 3 children images, then the value for key ID -// will be a list of 3 images. -// If an image has no children, it will not have an entry in the table. -func (graph *Graph) ByParent() map[string][]*image.Image { - byParent := make(map[string][]*image.Image) - graph.walkAll(func(img *image.Image) { - parent, err := graph.Get(img.Parent) - if err != nil { - return - } - byParent[parent.ID] = append(byParent[parent.ID], img) - }) - return byParent -} - -// HasChildren returns whether the given image has any child images. -func (graph *Graph) HasChildren(imgID string) bool { - graph.imageMutex.Lock(imgID) - count := graph.parentRefs[imgID] - graph.imageMutex.Unlock(imgID) - return count > 0 -} - -// Retain keeps the images and layers that are in the pulling chain so that -// they are not deleted. If not retained, they may be deleted by rmi. -func (graph *Graph) Retain(sessionID string, layerIDs ...string) { - graph.retained.Add(sessionID, layerIDs) -} - -// Release removes the referenced image ID from the provided set of layers. -func (graph *Graph) Release(sessionID string, layerIDs ...string) { - graph.retained.Delete(sessionID, layerIDs) -} - -// heads returns all heads in the graph, keyed by id. -// A head is an image which is not the parent of another image in the graph. -func (graph *Graph) heads() map[string]*image.Image { - heads := make(map[string]*image.Image) - graph.walkAll(func(image *image.Image) { - // if it has no children, then it's not a parent, so it's an head - if !graph.HasChildren(image.ID) { - heads[image.ID] = image - } - }) - return heads -} - -// tarLayer returns a tar archive of the image's filesystem layer. -func (graph *Graph) tarLayer(img *image.Image) (arch io.ReadCloser, err error) { - rdr, err := graph.assembleTarLayer(img) - if err != nil { - logrus.Debugf("[graph] tarLayer with traditional differ: %s", img.ID) - return graph.driver.Diff(img.ID, img.Parent) - } - return rdr, nil -} - -func (graph *Graph) imageRoot(id string) string { - return filepath.Join(graph.root, id) -} - -// loadImage fetches the image with the given id from the graph. -func (graph *Graph) loadImage(id string) (*image.Image, error) { - root := graph.imageRoot(id) - - // Open the JSON file to decode by streaming - jsonSource, err := os.Open(jsonPath(root)) - if err != nil { - return nil, err - } - defer jsonSource.Close() - - img := &image.Image{} - dec := json.NewDecoder(jsonSource) - - // Decode the JSON data - if err := dec.Decode(img); err != nil { - return nil, err - } - - if img.ID == "" { - img.ID = id - } - - if img.Parent == "" && img.ParentID != "" && img.ParentID.Validate() == nil { - img.Parent = img.ParentID.Hex() - } - - // compatibilityID for parent - parent, err := ioutil.ReadFile(filepath.Join(root, parentFileName)) - if err == nil && len(parent) > 0 { - img.Parent = string(parent) - } - - if err := image.ValidateID(img.ID); err != nil { - return nil, err - } - - if buf, err := ioutil.ReadFile(filepath.Join(root, layersizeFileName)); err != nil { - if !os.IsNotExist(err) { - return nil, err - } - // If the layersize file does not exist then set the size to a negative number - // because a layer size of 0 (zero) is valid - img.Size = -1 - } else { - // Using Atoi here instead would temporarily convert the size to a machine - // dependent integer type, which causes images larger than 2^31 bytes to - // display negative sizes on 32-bit machines: - size, err := strconv.ParseInt(string(buf), 10, 64) - if err != nil { - return nil, err - } - img.Size = int64(size) - } - - return img, nil -} - -// saveSize stores the `size` in the provided graph `img` directory `root`. -func (graph *Graph) saveSize(root string, size int64) error { - if err := ioutil.WriteFile(filepath.Join(root, layersizeFileName), []byte(strconv.FormatInt(size, 10)), 0600); err != nil { - return fmt.Errorf("Error storing image size in %s/%s: %s", root, layersizeFileName, err) - } - return nil -} - -// setLayerDigestWithLock sets the digest for the image layer to the provided value. -func (graph *Graph) setLayerDigestWithLock(id string, dgst digest.Digest) error { - graph.imageMutex.Lock(id) - defer graph.imageMutex.Unlock(id) - - return graph.setLayerDigest(id, dgst) -} -func (graph *Graph) setLayerDigest(id string, dgst digest.Digest) error { - root := graph.imageRoot(id) - if err := ioutil.WriteFile(filepath.Join(root, digestFileName), []byte(dgst.String()), 0600); err != nil { - return fmt.Errorf("Error storing digest in %s/%s: %s", root, digestFileName, err) - } - return nil -} - -// getLayerDigestWithLock gets the digest for the provide image layer id. -func (graph *Graph) getLayerDigestWithLock(id string) (digest.Digest, error) { - graph.imageMutex.Lock(id) - defer graph.imageMutex.Unlock(id) - - return graph.getLayerDigest(id) -} - -func (graph *Graph) getLayerDigest(id string) (digest.Digest, error) { - root := graph.imageRoot(id) - cs, err := ioutil.ReadFile(filepath.Join(root, digestFileName)) - if err != nil { - if os.IsNotExist(err) { - return "", errDigestNotSet - } - return "", err - } - return digest.ParseDigest(string(cs)) -} - -// setV1CompatibilityConfig stores the v1Compatibility JSON data associated -// with the image in the manifest to the disk -func (graph *Graph) setV1CompatibilityConfig(id string, data []byte) error { - root := graph.imageRoot(id) - return ioutil.WriteFile(filepath.Join(root, v1CompatibilityFileName), data, 0600) -} - -// getV1CompatibilityConfig reads the v1Compatibility JSON data for the image -// from the disk -func (graph *Graph) getV1CompatibilityConfig(id string) ([]byte, error) { - root := graph.imageRoot(id) - return ioutil.ReadFile(filepath.Join(root, v1CompatibilityFileName)) -} - -// generateV1CompatibilityChain makes sure v1Compatibility JSON data exists -// for the image. If it doesn't it generates and stores it for the image and -// all of it's parents based on the image config JSON. -func (graph *Graph) generateV1CompatibilityChain(id string) ([]byte, error) { - graph.imageMutex.Lock(id) - defer graph.imageMutex.Unlock(id) - - if v1config, err := graph.getV1CompatibilityConfig(id); err == nil { - return v1config, nil - } - - // generate new, store it to disk - img, err := graph.Get(id) - if err != nil { - return nil, err - } - - digestPrefix := string(digest.Canonical) + ":" - img.ID = strings.TrimPrefix(img.ID, digestPrefix) - - if img.Parent != "" { - parentConfig, err := graph.generateV1CompatibilityChain(img.Parent) - if err != nil { - return nil, err - } - var parent struct{ ID string } - err = json.Unmarshal(parentConfig, &parent) - if err != nil { - return nil, err - } - img.Parent = parent.ID - } - - json, err := json.Marshal(img) - if err != nil { - return nil, err - } - if err := graph.setV1CompatibilityConfig(id, json); err != nil { - return nil, err - } - return json, nil -} - -func jsonPath(root string) string { - return filepath.Join(root, jsonFileName) -} - -// storeImage stores file system layer data for the given image to the -// graph's storage driver. Image metadata is stored in a file -// at the specified root directory. -func (graph *Graph) storeImage(id, parent string, config []byte, layerData io.Reader, root string) (err error) { - var size int64 - // Store the layer. If layerData is not nil, unpack it into the new layer - if layerData != nil { - if size, err = graph.disassembleAndApplyTarLayer(id, parent, layerData, root); err != nil { - return err - } - } - - if err := graph.saveSize(root, size); err != nil { - return err - } - - if err := ioutil.WriteFile(jsonPath(root), config, 0600); err != nil { - return err - } - - // If image is pointing to a parent via CompatibilityID write the reference to disk - img, err := image.NewImgJSON(config) - if err != nil { - return err - } - - if (img.ParentID.Validate() == nil && parent != img.ParentID.Hex()) || (allowBaseParentImage && img.ParentID == "" && parent != "") { - // save compatibilityID parent if it doesn't match parentID - // on windows always save a parent file pointing to the base layer - if err := ioutil.WriteFile(filepath.Join(root, parentFileName), []byte(parent), 0600); err != nil { - return err - } - } - return nil -} - -func (graph *Graph) disassembleAndApplyTarLayer(id, parent string, layerData io.Reader, root string) (size int64, err error) { - var ar io.Reader - - if graph.tarSplitDisabled { - ar = layerData - } else { - // this is saving the tar-split metadata - mf, err := os.OpenFile(filepath.Join(root, tarDataFileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) - if err != nil { - return 0, err - } - - mfz := gzip.NewWriter(mf) - metaPacker := storage.NewJSONPacker(mfz) - defer mf.Close() - defer mfz.Close() - - inflatedLayerData, err := archive.DecompressStream(layerData) - if err != nil { - return 0, err - } - - // we're passing nil here for the file putter, because the ApplyDiff will - // handle the extraction of the archive - rdr, err := asm.NewInputTarStream(inflatedLayerData, metaPacker, nil) - if err != nil { - return 0, err - } - - ar = archive.Reader(rdr) - } - - if size, err = graph.driver.ApplyDiff(id, parent, ar); err != nil { - return 0, err - } - - return -} - -func (graph *Graph) assembleTarLayer(img *image.Image) (io.ReadCloser, error) { - root := graph.imageRoot(img.ID) - mFileName := filepath.Join(root, tarDataFileName) - mf, err := os.Open(mFileName) - if err != nil { - if !os.IsNotExist(err) { - logrus.Errorf("failed to open %q: %s", mFileName, err) - } - return nil, err - } - pR, pW := io.Pipe() - // this will need to be in a goroutine, as we are returning the stream of a - // tar archive, but can not close the metadata reader early (when this - // function returns)... - go func() { - defer mf.Close() - // let's reassemble! - logrus.Debugf("[graph] TarLayer with reassembly: %s", img.ID) - mfz, err := gzip.NewReader(mf) - if err != nil { - pW.CloseWithError(fmt.Errorf("[graph] error with %s: %s", mFileName, err)) - return - } - defer mfz.Close() - - // get our relative path to the container - fsLayer, err := graph.driver.Get(img.ID, "") - if err != nil { - pW.CloseWithError(err) - return - } - defer graph.driver.Put(img.ID) - - metaUnpacker := storage.NewJSONUnpacker(mfz) - fileGetter := storage.NewPathFileGetter(fsLayer) - logrus.Debugf("[graph] %s is at %q", img.ID, fsLayer) - ots := asm.NewOutputTarStream(fileGetter, metaUnpacker) - defer ots.Close() - if _, err := io.Copy(pW, ots); err != nil { - pW.CloseWithError(err) - return - } - pW.Close() - }() - return pR, nil -} diff --git a/graph/graph_test.go b/graph/graph_test.go deleted file mode 100644 index 3e0ba3c6c7..0000000000 --- a/graph/graph_test.go +++ /dev/null @@ -1,308 +0,0 @@ -package graph - -import ( - "errors" - "io" - "io/ioutil" - "os" - "path" - "testing" - "time" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/stringid" -) - -func TestMount(t *testing.T) { - graph, driver := tempGraph(t) - defer os.RemoveAll(graph.root) - defer driver.Cleanup() - - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - image, err := graph.Create(archive, "", "", "Testing", "", nil, nil) - if err != nil { - t.Fatal(err) - } - tmp, err := ioutil.TempDir("", "docker-test-graph-mount-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - rootfs := path.Join(tmp, "rootfs") - if err := os.MkdirAll(rootfs, 0700); err != nil { - t.Fatal(err) - } - rw := path.Join(tmp, "rw") - if err := os.MkdirAll(rw, 0700); err != nil { - t.Fatal(err) - } - - if _, err := driver.Get(image.ID, ""); err != nil { - t.Fatal(err) - } - -} - -func TestInit(t *testing.T) { - graph, _ := tempGraph(t) - defer nukeGraph(graph) - // Root should exist - if _, err := os.Stat(graph.root); err != nil { - t.Fatal(err) - } - // Map() should be empty - l := graph.Map() - if len(l) != 0 { - t.Fatalf("len(Map()) should return %d, not %d", 0, len(l)) - } -} - -// Test that Register can be interrupted cleanly without side effects -func TestInterruptedRegister(t *testing.T) { - graph, _ := tempGraph(t) - defer nukeGraph(graph) - badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data - image := &image.Image{ - ID: stringid.GenerateNonCryptoID(), - Comment: "testing", - Created: time.Now(), - } - w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling) - graph.Register(v1Descriptor{image}, badArchive) - if _, err := graph.Get(image.ID); err == nil { - t.Fatal("Image should not exist after Register is interrupted") - } - // Registering the same image again should succeed if the first register was interrupted - goodArchive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - if err := graph.Register(v1Descriptor{image}, goodArchive); err != nil { - t.Fatal(err) - } -} - -// FIXME: Do more extensive tests (ex: create multiple, delete, recreate; -// create multiple, check the amount of images and paths, etc..) -func TestGraphCreate(t *testing.T) { - graph, _ := tempGraph(t) - defer nukeGraph(graph) - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - img, err := graph.Create(archive, "", "", "Testing", "", nil, nil) - if err != nil { - t.Fatal(err) - } - if err := image.ValidateID(img.ID); err != nil { - t.Fatal(err) - } - if img.Comment != "Testing" { - t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", img.Comment) - } - if img.DockerVersion != dockerversion.Version { - t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.Version, img.DockerVersion) - } - images := graph.Map() - if l := len(images); l != 1 { - t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) - } - if images[img.ID] == nil { - t.Fatalf("Could not find image with id %s", img.ID) - } -} - -func TestRegister(t *testing.T) { - graph, _ := tempGraph(t) - defer nukeGraph(graph) - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - image := &image.Image{ - ID: stringid.GenerateNonCryptoID(), - Comment: "testing", - Created: time.Now(), - } - err = graph.Register(v1Descriptor{image}, archive) - if err != nil { - t.Fatal(err) - } - images := graph.Map() - if l := len(images); l != 1 { - t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) - } - if resultImg, err := graph.Get(image.ID); err != nil { - t.Fatal(err) - } else { - if resultImg.ID != image.ID { - t.Fatalf("Wrong image ID. Should be '%s', not '%s'", image.ID, resultImg.ID) - } - if resultImg.Comment != image.Comment { - t.Fatalf("Wrong image comment. Should be '%s', not '%s'", image.Comment, resultImg.Comment) - } - } -} - -// Test that an image can be deleted by its shorthand prefix -func TestDeletePrefix(t *testing.T) { - graph, _ := tempGraph(t) - defer nukeGraph(graph) - img := createTestImage(graph, t) - if err := graph.Delete(stringid.TruncateID(img.ID)); err != nil { - t.Fatal(err) - } - assertNImages(graph, t, 0) -} - -func TestDelete(t *testing.T) { - graph, _ := tempGraph(t) - defer nukeGraph(graph) - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - assertNImages(graph, t, 0) - img, err := graph.Create(archive, "", "", "Bla bla", "", nil, nil) - if err != nil { - t.Fatal(err) - } - assertNImages(graph, t, 1) - if err := graph.Delete(img.ID); err != nil { - t.Fatal(err) - } - assertNImages(graph, t, 0) - - archive, err = fakeTar() - if err != nil { - t.Fatal(err) - } - // Test 2 create (same name) / 1 delete - img1, err := graph.Create(archive, "", "", "Testing", "", nil, nil) - if err != nil { - t.Fatal(err) - } - archive, err = fakeTar() - if err != nil { - t.Fatal(err) - } - if _, err = graph.Create(archive, "", "", "Testing", "", nil, nil); err != nil { - t.Fatal(err) - } - assertNImages(graph, t, 2) - if err := graph.Delete(img1.ID); err != nil { - t.Fatal(err) - } - assertNImages(graph, t, 1) - - // Test delete wrong name - if err := graph.Delete("Not_foo"); err == nil { - t.Fatalf("Deleting wrong ID should return an error") - } - assertNImages(graph, t, 1) - - archive, err = fakeTar() - if err != nil { - t.Fatal(err) - } - // Test delete twice (pull -> rm -> pull -> rm) - if err := graph.Register(v1Descriptor{img1}, archive); err != nil { - t.Fatal(err) - } - if err := graph.Delete(img1.ID); err != nil { - t.Fatal(err) - } - assertNImages(graph, t, 1) -} - -func TestByParent(t *testing.T) { - archive1, _ := fakeTar() - archive2, _ := fakeTar() - archive3, _ := fakeTar() - - graph, _ := tempGraph(t) - defer nukeGraph(graph) - parentImage := &image.Image{ - ID: stringid.GenerateNonCryptoID(), - Comment: "parent", - Created: time.Now(), - Parent: "", - } - childImage1 := &image.Image{ - ID: stringid.GenerateNonCryptoID(), - Comment: "child1", - Created: time.Now(), - Parent: parentImage.ID, - } - childImage2 := &image.Image{ - ID: stringid.GenerateNonCryptoID(), - Comment: "child2", - Created: time.Now(), - Parent: parentImage.ID, - } - - err := graph.Register(v1Descriptor{parentImage}, archive1) - if err != nil { - t.Fatal(err) - } - err = graph.Register(v1Descriptor{childImage1}, archive2) - if err != nil { - t.Fatal(err) - } - err = graph.Register(v1Descriptor{childImage2}, archive3) - if err != nil { - t.Fatal(err) - } - - byParent := graph.ByParent() - numChildren := len(byParent[parentImage.ID]) - if numChildren != 2 { - t.Fatalf("Expected 2 children, found %d", numChildren) - } -} - -func createTestImage(graph *Graph, t *testing.T) *image.Image { - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - img, err := graph.Create(archive, "", "", "Test image", "", nil, nil) - if err != nil { - t.Fatal(err) - } - return img -} - -func assertNImages(graph *Graph, t *testing.T, n int) { - images := graph.Map() - if actualN := len(images); actualN != n { - t.Fatalf("Expected %d images, found %d", n, actualN) - } -} - -func tempGraph(t *testing.T) (*Graph, graphdriver.Driver) { - tmp, err := ioutil.TempDir("", "docker-graph-") - if err != nil { - t.Fatal(err) - } - driver, err := graphdriver.New(tmp, nil, nil, nil) - if err != nil { - t.Fatal(err) - } - graph, err := NewGraph(tmp, driver, nil, nil) - if err != nil { - t.Fatal(err) - } - return graph, driver -} - -func nukeGraph(graph *Graph) { - graph.driver.Cleanup() - os.RemoveAll(graph.root) -} diff --git a/graph/graph_unix.go b/graph/graph_unix.go deleted file mode 100644 index 19fbfbff97..0000000000 --- a/graph/graph_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package graph - -// allowBaseParentImage allows images to define a custom parent that is not -// transported with push/pull but already included with the installation. -// Only used in Windows. -const allowBaseParentImage = false diff --git a/graph/graph_windows.go b/graph/graph_windows.go deleted file mode 100644 index a9dfefc77f..0000000000 --- a/graph/graph_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package graph - -// allowBaseParentImage allows images to define a custom parent that is not -// transported with push/pull but already included with the installation. -// Only used in Windows. -const allowBaseParentImage = true diff --git a/graph/history.go b/graph/history.go deleted file mode 100644 index de702987a4..0000000000 --- a/graph/history.go +++ /dev/null @@ -1,119 +0,0 @@ -package graph - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/image" - "github.com/docker/docker/utils" -) - -// walkHistory calls the handler function for each image in the -// provided images lineage starting from immediate parent. -func (graph *Graph) walkHistory(img *image.Image, handler func(image.Image) error) (err error) { - currentImg := img - for currentImg != nil { - if handler != nil { - if err := handler(*currentImg); err != nil { - return err - } - } - currentImg, err = graph.GetParent(currentImg) - if err != nil { - return fmt.Errorf("Error while getting parent image: %v", err) - } - } - return nil -} - -// depth returns the number of parents for the current image -func (graph *Graph) depth(img *image.Image) (int, error) { - var ( - count = 0 - parent = img - err error - ) - - for parent != nil { - count++ - if parent, err = graph.GetParent(parent); err != nil { - return -1, err - } - } - return count, nil -} - -// Set the max depth to the aufs default that most kernels are compiled with. -// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk -const maxImageDepth = 127 - -// CheckDepth returns an error if the depth of an image, as returned -// by ImageDepth, is too large to support creating a container from it -// on this daemon. -func (graph *Graph) CheckDepth(img *image.Image) error { - // We add 2 layers to the depth because the container's rw and - // init layer add to the restriction - depth, err := graph.depth(img) - if err != nil { - return err - } - if depth+2 >= maxImageDepth { - return fmt.Errorf("Cannot create container with more than %d parents", maxImageDepth) - } - return nil -} - -// History returns a slice of ImageHistory structures for the specified image -// name by walking the image lineage. -func (s *TagStore) History(name string) ([]*types.ImageHistory, error) { - foundImage, err := s.LookupImage(name) - if err != nil { - return nil, err - } - - lookupMap := make(map[string][]string) - for name, repository := range s.Repositories { - for tag, id := range repository { - // If the ID already has a reverse lookup, do not update it unless for "latest" - if _, exists := lookupMap[id]; !exists { - lookupMap[id] = []string{} - } - lookupMap[id] = append(lookupMap[id], utils.ImageReference(name, tag)) - } - } - - history := []*types.ImageHistory{} - - err = s.graph.walkHistory(foundImage, func(img image.Image) error { - history = append(history, &types.ImageHistory{ - ID: img.ID, - Created: img.Created.Unix(), - CreatedBy: strings.Join(img.ContainerConfig.Cmd.Slice(), " "), - Tags: lookupMap[img.ID], - Size: img.Size, - Comment: img.Comment, - }) - return nil - }) - - return history, err -} - -// GetParent returns the parent image for the specified image. -func (graph *Graph) GetParent(img *image.Image) (*image.Image, error) { - if img.Parent == "" { - return nil, nil - } - return graph.Get(img.Parent) -} - -// getParentsSize returns the combined size of all parent images. If there is -// no parent image or it's unavailable, it returns 0. -func (graph *Graph) getParentsSize(img *image.Image) int64 { - parentImage, err := graph.GetParent(img) - if err != nil || parentImage == nil { - return 0 - } - return parentImage.Size + graph.getParentsSize(parentImage) -} diff --git a/graph/import.go b/graph/import.go deleted file mode 100644 index 180f205da4..0000000000 --- a/graph/import.go +++ /dev/null @@ -1,73 +0,0 @@ -package graph - -import ( - "io" - "net/http" - "net/url" - - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/progressreader" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/runconfig" -) - -// Import imports an image, getting the archived layer data either from -// inConfig (if src is "-"), or from a URI specified in src. Progress output is -// written to outStream. Repository and tag names can optionally be given in -// the repo and tag arguments, respectively. -func (s *TagStore) Import(src string, repo string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error { - var ( - sf = streamformatter.NewJSONStreamFormatter() - archive io.ReadCloser - resp *http.Response - ) - - if src == "-" { - archive = inConfig - } else { - inConfig.Close() - u, err := url.Parse(src) - if err != nil { - return err - } - if u.Scheme == "" { - u.Scheme = "http" - u.Host = src - u.Path = "" - } - outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) - resp, err = httputils.Download(u.String()) - if err != nil { - return err - } - progressReader := progressreader.New(progressreader.Config{ - In: resp.Body, - Out: outStream, - Formatter: sf, - Size: resp.ContentLength, - NewLines: true, - ID: "", - Action: "Importing", - }) - archive = progressReader - } - - defer archive.Close() - if len(msg) == 0 { - msg = "Imported from " + src - } - - img, err := s.graph.Create(archive, "", "", msg, "", nil, containerConfig) - if err != nil { - return err - } - // Optionally register the image at REPO/TAG - if repo != "" { - if err := s.Tag(repo, tag, img.ID, true); err != nil { - return err - } - } - outStream.Write(sf.FormatStatus("", img.ID)) - s.eventsService.Log("import", img.ID, "") - return nil -} diff --git a/graph/list.go b/graph/list.go deleted file mode 100644 index 675110ff61..0000000000 --- a/graph/list.go +++ /dev/null @@ -1,185 +0,0 @@ -package graph - -import ( - "fmt" - "path" - "sort" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/parsers/filters" - "github.com/docker/docker/utils" -) - -var acceptedImageFilterTags = map[string]struct{}{ - "dangling": {}, - "label": {}, -} - -// byCreated is a temporary type used to sort a list of images by creation -// time. -type byCreated []*types.Image - -func (r byCreated) Len() int { return len(r) } -func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } - -// Images returns a filtered list of images. filterArgs is a JSON-encoded set -// of filter arguments which will be interpreted by pkg/parsers/filters. -// filter is a shell glob string applied to repository names. The argument -// named all controls whether all images in the graph are filtered, or just -// the heads. -func (s *TagStore) Images(filterArgs, filter string, all bool) ([]*types.Image, error) { - var ( - allImages map[string]*image.Image - err error - filtTagged = true - filtLabel = false - ) - - imageFilters, err := filters.FromParam(filterArgs) - if err != nil { - return nil, err - } - for name := range imageFilters { - if _, ok := acceptedImageFilterTags[name]; !ok { - return nil, fmt.Errorf("Invalid filter '%s'", name) - } - } - - if i, ok := imageFilters["dangling"]; ok { - for _, value := range i { - if v := strings.ToLower(value); v == "true" { - filtTagged = false - } else if v != "false" { - return nil, fmt.Errorf("Invalid filter 'dangling=%s'", v) - } - } - } - - _, filtLabel = imageFilters["label"] - - if all && filtTagged { - allImages = s.graph.Map() - } else { - allImages = s.graph.heads() - } - - lookup := make(map[string]*types.Image) - s.Lock() - for repoName, repository := range s.Repositories { - filterTagName := "" - if filter != "" { - filterName := filter - // Test if the tag was in there, if yes, get the name - if strings.Contains(filterName, ":") { - filterWithTag := strings.Split(filter, ":") - filterName = filterWithTag[0] - filterTagName = filterWithTag[1] - } - if match, _ := path.Match(filterName, repoName); !match { - continue - } - if filterTagName != "" { - if _, ok := repository[filterTagName]; !ok { - continue - } - } - } - for ref, id := range repository { - imgRef := utils.ImageReference(repoName, ref) - if !strings.Contains(imgRef, filterTagName) { - continue - } - image, err := s.graph.Get(id) - if err != nil { - logrus.Warnf("couldn't load %s from %s: %s", id, imgRef, err) - continue - } - - if lImage, exists := lookup[id]; exists { - if filtTagged { - if utils.DigestReference(ref) { - lImage.RepoDigests = append(lImage.RepoDigests, imgRef) - } else { // Tag Ref. - lImage.RepoTags = append(lImage.RepoTags, imgRef) - } - } - } else { - // get the boolean list for if only the untagged images are requested - delete(allImages, id) - - if len(imageFilters["label"]) > 0 { - if image.Config == nil { - // Very old image that do not have image.Config (or even labels) - continue - } - // We are now sure image.Config is not nil - if !imageFilters.MatchKVList("label", image.Config.Labels) { - continue - } - } - if filtTagged { - newImage := newImage(image, s.graph.getParentsSize(image)) - - if utils.DigestReference(ref) { - newImage.RepoTags = []string{} - newImage.RepoDigests = []string{imgRef} - } else { - newImage.RepoTags = []string{imgRef} - newImage.RepoDigests = []string{} - } - - lookup[id] = newImage - } - } - - } - } - s.Unlock() - - images := []*types.Image{} - for _, value := range lookup { - images = append(images, value) - } - - // Display images which aren't part of a repository/tag - if filter == "" || filtLabel { - for _, image := range allImages { - if len(imageFilters["label"]) > 0 { - if image.Config == nil { - // Very old image that do not have image.Config (or even labels) - continue - } - // We are now sure image.Config is not nil - if !imageFilters.MatchKVList("label", image.Config.Labels) { - continue - } - } - newImage := newImage(image, s.graph.getParentsSize(image)) - newImage.RepoTags = []string{":"} - newImage.RepoDigests = []string{"@"} - - images = append(images, newImage) - } - } - - sort.Sort(sort.Reverse(byCreated(images))) - - return images, nil -} - -func newImage(image *image.Image, parentSize int64) *types.Image { - newImage := new(types.Image) - newImage.ParentID = image.Parent - newImage.ID = image.ID - newImage.Created = image.Created.Unix() - newImage.Size = image.Size - newImage.VirtualSize = parentSize + image.Size - if image.Config != nil { - newImage.Labels = image.Config.Labels - } - return newImage -} diff --git a/graph/load.go b/graph/load.go deleted file mode 100644 index 3be3a8e815..0000000000 --- a/graph/load.go +++ /dev/null @@ -1,134 +0,0 @@ -// +build linux windows - -package graph - -import ( - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" -) - -// Load uploads a set of images into the repository. This is the complementary of ImageExport. -// The input stream is an uncompressed tar ball containing images and metadata. -func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error { - tmpImageDir, err := ioutil.TempDir("", "docker-import-") - if err != nil { - return err - } - defer os.RemoveAll(tmpImageDir) - - var ( - repoDir = filepath.Join(tmpImageDir, "repo") - ) - - if err := os.Mkdir(repoDir, os.ModeDir); err != nil { - return err - } - images := s.graph.Map() - excludes := make([]string, len(images)) - i := 0 - for k := range images { - excludes[i] = k - i++ - } - if err := chrootarchive.Untar(inTar, repoDir, &archive.TarOptions{ExcludePatterns: excludes}); err != nil { - return err - } - - dirs, err := ioutil.ReadDir(repoDir) - if err != nil { - return err - } - - for _, d := range dirs { - if d.IsDir() { - if err := s.recursiveLoad(d.Name(), tmpImageDir); err != nil { - return err - } - } - } - - reposJSONFile, err := os.Open(filepath.Join(tmpImageDir, "repo", "repositories")) - if err != nil { - if !os.IsNotExist(err) { - return err - } - return nil - } - defer reposJSONFile.Close() - - repositories := map[string]repository{} - if err := json.NewDecoder(reposJSONFile).Decode(&repositories); err != nil { - return err - } - - for imageName, tagMap := range repositories { - for tag, address := range tagMap { - if err := s.setLoad(imageName, tag, address, true, outStream); err != nil { - return err - } - } - } - - return nil -} - -func (s *TagStore) recursiveLoad(address, tmpImageDir string) error { - if _, err := s.LookupImage(address); err != nil { - logrus.Debugf("Loading %s", address) - - imageJSON, err := ioutil.ReadFile(filepath.Join(tmpImageDir, "repo", address, "json")) - if err != nil { - logrus.Debugf("Error reading json: %v", err) - return err - } - - layer, err := os.Open(filepath.Join(tmpImageDir, "repo", address, "layer.tar")) - if err != nil { - logrus.Debugf("Error reading embedded tar: %v", err) - return err - } - img, err := image.NewImgJSON(imageJSON) - if err != nil { - logrus.Debugf("Error unmarshalling json: %v", err) - return err - } - if err := image.ValidateID(img.ID); err != nil { - logrus.Debugf("Error validating ID: %v", err) - return err - } - - // ensure no two downloads of the same layer happen at the same time - poolKey := "layer:" + img.ID - broadcaster, found := s.poolAdd("pull", poolKey) - if found { - logrus.Debugf("Image (id: %s) load is already running, waiting", img.ID) - return broadcaster.Wait() - } - - defer s.poolRemove("pull", poolKey) - - if img.Parent != "" { - if !s.graph.Exists(img.Parent) { - if err := s.recursiveLoad(img.Parent, tmpImageDir); err != nil { - return err - } - } - } - if err := s.graph.Register(v1Descriptor{img}, layer); err != nil { - return err - } - logrus.Debugf("Completed processing %s", address) - return nil - } - logrus.Debugf("already loaded %s", address) - - return nil -} diff --git a/graph/load_unsupported.go b/graph/load_unsupported.go deleted file mode 100644 index 7154931f29..0000000000 --- a/graph/load_unsupported.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !linux,!windows - -package graph - -import ( - "fmt" - "io" -) - -// Load method is implemented here for non-linux and non-windows platforms and -// may return an error indicating that image load is not supported on other platforms. -func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error { - return fmt.Errorf("Load is not supported on this platform") -} diff --git a/graph/pools_test.go b/graph/pools_test.go deleted file mode 100644 index 6382c15596..0000000000 --- a/graph/pools_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package graph - -import ( - "testing" - - "github.com/docker/docker/pkg/broadcaster" - "github.com/docker/docker/pkg/reexec" -) - -func init() { - reexec.Init() -} - -func TestPools(t *testing.T) { - s := &TagStore{ - pullingPool: make(map[string]*broadcaster.Buffered), - pushingPool: make(map[string]*broadcaster.Buffered), - } - - if _, found := s.poolAdd("pull", "test1"); found { - t.Fatal("Expected pull test1 not to be in progress") - } - if _, found := s.poolAdd("pull", "test2"); found { - t.Fatal("Expected pull test2 not to be in progress") - } - if _, found := s.poolAdd("push", "test1"); !found { - t.Fatalf("Expected pull test1 to be in progress`") - } - if _, found := s.poolAdd("pull", "test1"); !found { - t.Fatalf("Expected pull test1 to be in progress`") - } - if err := s.poolRemove("pull", "test2"); err != nil { - t.Fatal(err) - } - if err := s.poolRemove("pull", "test2"); err != nil { - t.Fatal(err) - } - if err := s.poolRemove("pull", "test1"); err != nil { - t.Fatal(err) - } - if err := s.poolRemove("push", "test1"); err != nil { - t.Fatal(err) - } -} diff --git a/graph/pull.go b/graph/pull.go deleted file mode 100644 index 8cee94886b..0000000000 --- a/graph/pull.go +++ /dev/null @@ -1,157 +0,0 @@ -package graph - -import ( - "fmt" - "io" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" -) - -// ImagePullConfig stores pull configuration. -type ImagePullConfig struct { - // MetaHeaders stores HTTP headers with metadata about the image - // (DockerHeaders with prefix X-Meta- in the request). - MetaHeaders map[string][]string - // AuthConfig holds authentication credentials for authenticating with - // the registry. - AuthConfig *cliconfig.AuthConfig - // OutStream is the output writer for showing the status of the pull - // operation. - OutStream io.Writer -} - -// puller is an interface that abstracts pulling for different API versions. -type puller interface { - // Pull tries to pull the image referenced by `tag` - // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. - // - // TODO(tiborvass): have Pull() take a reference to repository + tag, so that the puller itself is repository-agnostic. - Pull(tag string) (fallback bool, err error) -} - -// newPuller returns a Puller interface that will pull from either a v1 or v2 -// registry. The endpoint argument contains a Version field that determines -// whether a v1 or v2 puller will be created. The other parameters are passed -// through to the underlying puller implementation for use during the actual -// pull operation. -func newPuller(s *TagStore, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig, sf *streamformatter.StreamFormatter) (puller, error) { - switch endpoint.Version { - case registry.APIVersion2: - return &v2Puller{ - TagStore: s, - endpoint: endpoint, - config: imagePullConfig, - sf: sf, - repoInfo: repoInfo, - }, nil - case registry.APIVersion1: - return &v1Puller{ - TagStore: s, - endpoint: endpoint, - config: imagePullConfig, - sf: sf, - repoInfo: repoInfo, - }, nil - } - return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) -} - -// Pull initiates a pull operation. image is the repository name to pull, and -// tag may be either empty, or indicate a specific tag to pull. -func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConfig) error { - var sf = streamformatter.NewJSONStreamFormatter() - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := s.registryService.ResolveRepository(image) - if err != nil { - return err - } - - // makes sure name is not empty or `scratch` - if err := validateRepoName(repoInfo.LocalName); err != nil { - return err - } - - endpoints, err := s.registryService.LookupPullEndpoints(repoInfo.CanonicalName) - if err != nil { - return err - } - - logName := repoInfo.LocalName - if tag != "" { - logName = utils.ImageReference(logName, tag) - } - - var ( - // use a slice to append the error strings and return a joined string to caller - errors []string - - // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport - // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in errors. - // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of - // any subsequent ErrNoSupport errors in errors. - // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be - // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant - // error is the ones from v2 endpoints not v1. - discardNoSupportErrors bool - ) - for _, endpoint := range endpoints { - logrus.Debugf("Trying to pull %s from %s %s", repoInfo.LocalName, endpoint.URL, endpoint.Version) - - puller, err := newPuller(s, endpoint, repoInfo, imagePullConfig, sf) - if err != nil { - errors = append(errors, err.Error()) - continue - } - if fallback, err := puller.Pull(tag); err != nil { - if fallback { - if _, ok := err.(registry.ErrNoSupport); !ok { - // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. - discardNoSupportErrors = true - // append subsequent errors - errors = append(errors, err.Error()) - } else if !discardNoSupportErrors { - // Save the ErrNoSupport error, because it's either the first error or all encountered errors - // were also ErrNoSupport errors. - // append subsequent errors - errors = append(errors, err.Error()) - } - continue - } - errors = append(errors, err.Error()) - logrus.Debugf("Not continuing with error: %v", fmt.Errorf(strings.Join(errors, "\n"))) - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - } - - s.eventsService.Log("pull", logName, "") - return nil - } - - if len(errors) == 0 { - return fmt.Errorf("no endpoints found for %s", image) - } - - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - return nil -} - -// writeStatus writes a status message to out. If layersDownloaded is true, the -// status message indicates that a newer image was downloaded. Otherwise, it -// indicates that the image is up to date. requestedTag is the tag the message -// will refer to. -func writeStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamFormatter, layersDownloaded bool) { - if layersDownloaded { - out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag)) - } else { - out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag)) - } -} diff --git a/graph/pull_v1.go b/graph/pull_v1.go deleted file mode 100644 index 51150dbee9..0000000000 --- a/graph/pull_v1.go +++ /dev/null @@ -1,350 +0,0 @@ -package graph - -import ( - "errors" - "fmt" - "io" - "net" - "net/url" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/progressreader" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" -) - -type v1Puller struct { - *TagStore - endpoint registry.APIEndpoint - config *ImagePullConfig - sf *streamformatter.StreamFormatter - repoInfo *registry.RepositoryInfo - session *registry.Session -} - -func (p *v1Puller) Pull(tag string) (fallback bool, err error) { - if utils.DigestReference(tag) { - // Allowing fallback, because HTTPS v1 is before HTTP v2 - return true, registry.ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")} - } - - tlsConfig, err := p.registryService.TLSConfig(p.repoInfo.Index.Name) - if err != nil { - return false, err - } - // Adds Docker-specific headers as well as user-specified headers (metaHeaders) - tr := transport.NewTransport( - // TODO(tiborvass): was ReceiveTimeout - registry.NewTransport(tlsConfig), - registry.DockerHeaders(p.config.MetaHeaders)..., - ) - client := registry.HTTPClient(tr) - v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) - if err != nil { - logrus.Debugf("Could not get v1 endpoint: %v", err) - return true, err - } - p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) - if err != nil { - // TODO(dmcgowan): Check if should fallback - logrus.Debugf("Fallback from error: %s", err) - return true, err - } - if err := p.pullRepository(tag); err != nil { - // TODO(dmcgowan): Check if should fallback - return false, err - } - out := p.config.OutStream - out.Write(p.sf.FormatStatus("", "%s: this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.", p.repoInfo.CanonicalName)) - - return false, nil -} - -func (p *v1Puller) pullRepository(askedTag string) error { - out := p.config.OutStream - out.Write(p.sf.FormatStatus("", "Pulling repository %s", p.repoInfo.CanonicalName)) - - repoData, err := p.session.GetRepositoryData(p.repoInfo.RemoteName) - if err != nil { - if strings.Contains(err.Error(), "HTTP code: 404") { - return fmt.Errorf("Error: image %s not found", utils.ImageReference(p.repoInfo.RemoteName, askedTag)) - } - // Unexpected HTTP error - return err - } - - logrus.Debugf("Retrieving the tag list") - tagsList := make(map[string]string) - if askedTag == "" { - tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.RemoteName) - } else { - var tagID string - tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.RemoteName, askedTag) - tagsList[askedTag] = tagID - } - if err != nil { - if err == registry.ErrRepoNotFound && askedTag != "" { - return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName) - } - logrus.Errorf("unable to get remote tags: %s", err) - return err - } - - for tag, id := range tagsList { - repoData.ImgList[id] = ®istry.ImgData{ - ID: id, - Tag: tag, - Checksum: "", - } - } - - logrus.Debugf("Registering tags") - // If no tag has been specified, pull them all - if askedTag == "" { - for tag, id := range tagsList { - repoData.ImgList[id].Tag = tag - } - } else { - // Otherwise, check that the tag exists and use only that one - id, exists := tagsList[askedTag] - if !exists { - return fmt.Errorf("Tag %s not found in repository %s", askedTag, p.repoInfo.CanonicalName) - } - repoData.ImgList[id].Tag = askedTag - } - - errors := make(chan error) - - layersDownloaded := false - imgIDs := []string{} - sessionID := p.session.ID() - defer func() { - p.graph.Release(sessionID, imgIDs...) - }() - for _, imgData := range repoData.ImgList { - downloadImage := func(img *registry.ImgData) { - if askedTag != "" && img.Tag != askedTag { - errors <- nil - return - } - - if img.Tag == "" { - logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) - errors <- nil - return - } - - if err := image.ValidateID(img.ID); err != nil { - errors <- err - return - } - - // ensure no two downloads of the same image happen at the same time - poolKey := "img:" + img.ID - broadcaster, found := p.poolAdd("pull", poolKey) - broadcaster.Add(out) - if found { - errors <- broadcaster.Wait() - return - } - defer p.poolRemove("pull", poolKey) - - // we need to retain it until tagging - p.graph.Retain(sessionID, img.ID) - imgIDs = append(imgIDs, img.ID) - - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName), nil)) - success := false - var lastErr, err error - var isDownloaded bool - for _, ep := range p.repoInfo.Index.Mirrors { - ep += "v1/" - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) - if isDownloaded, err = p.pullImage(broadcaster, img.ID, ep); err != nil { - // Don't report errors when pulling from mirrors. - logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err) - continue - } - layersDownloaded = layersDownloaded || isDownloaded - success = true - break - } - if !success { - for _, ep := range repoData.Endpoints { - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName, ep), nil)) - if isDownloaded, err = p.pullImage(broadcaster, img.ID, ep); err != nil { - // It's not ideal that only the last error is returned, it would be better to concatenate the errors. - // As the error is also given to the output stream the user will see the error. - lastErr = err - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName, ep, err), nil)) - continue - } - layersDownloaded = layersDownloaded || isDownloaded - success = true - break - } - } - if !success { - err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName, lastErr) - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), err.Error(), nil)) - errors <- err - broadcaster.CloseWithError(err) - return - } - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) - - errors <- nil - } - - go downloadImage(imgData) - } - - var lastError error - for i := 0; i < len(repoData.ImgList); i++ { - if err := <-errors; err != nil { - lastError = err - } - } - if lastError != nil { - return lastError - } - - for tag, id := range tagsList { - if askedTag != "" && tag != askedTag { - continue - } - if err := p.Tag(p.repoInfo.LocalName, tag, id, true); err != nil { - return err - } - } - - requestedTag := p.repoInfo.LocalName - if len(askedTag) > 0 { - requestedTag = utils.ImageReference(p.repoInfo.LocalName, askedTag) - } - writeStatus(requestedTag, out, p.sf, layersDownloaded) - return nil -} - -func (p *v1Puller) pullImage(out io.Writer, imgID, endpoint string) (layersDownloaded bool, err error) { - var history []string - history, err = p.session.GetRemoteHistory(imgID, endpoint) - if err != nil { - return false, err - } - out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil)) - // FIXME: Try to stream the images? - // FIXME: Launch the getRemoteImage() in goroutines - - sessionID := p.session.ID() - // As imgID has been retained in pullRepository, no need to retain again - p.graph.Retain(sessionID, history[1:]...) - defer p.graph.Release(sessionID, history[1:]...) - - layersDownloaded = false - for i := len(history) - 1; i >= 0; i-- { - id := history[i] - - // ensure no two downloads of the same layer happen at the same time - poolKey := "layer:" + id - broadcaster, found := p.poolAdd("pull", poolKey) - broadcaster.Add(out) - if found { - logrus.Debugf("Image (id: %s) pull is already running, skipping", id) - err = broadcaster.Wait() - if err != nil { - return layersDownloaded, err - } - continue - } - - // This must use a closure so it captures the value of err when - // the function returns, not when the 'defer' is evaluated. - defer func() { - p.poolRemoveWithError("pull", poolKey, err) - }() - - if !p.graph.Exists(id) { - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil)) - var ( - imgJSON []byte - imgSize int64 - err error - img *image.Image - ) - retries := 5 - for j := 1; j <= retries; j++ { - imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint) - if err != nil && j == retries { - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) - return layersDownloaded, err - } else if err != nil { - time.Sleep(time.Duration(j) * 500 * time.Millisecond) - continue - } - img, err = image.NewImgJSON(imgJSON) - layersDownloaded = true - if err != nil && j == retries { - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) - return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err) - } else if err != nil { - time.Sleep(time.Duration(j) * 500 * time.Millisecond) - continue - } else { - break - } - } - - for j := 1; j <= retries; j++ { - // Get the layer - status := "Pulling fs layer" - if j > 1 { - status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) - } - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil)) - layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, imgSize) - if uerr, ok := err.(*url.Error); ok { - err = uerr.Err - } - if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { - time.Sleep(time.Duration(j) * 500 * time.Millisecond) - continue - } else if err != nil { - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil)) - return layersDownloaded, err - } - layersDownloaded = true - defer layer.Close() - - err = p.graph.Register(v1Descriptor{img}, - progressreader.New(progressreader.Config{ - In: layer, - Out: broadcaster, - Formatter: p.sf, - Size: imgSize, - NewLines: false, - ID: stringid.TruncateID(id), - Action: "Downloading", - })) - if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { - time.Sleep(time.Duration(j) * 500 * time.Millisecond) - continue - } else if err != nil { - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil)) - return layersDownloaded, err - } else { - break - } - } - } - broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil)) - broadcaster.Close() - } - return layersDownloaded, nil -} diff --git a/graph/pull_v2.go b/graph/pull_v2.go deleted file mode 100644 index 4ccc6c7196..0000000000 --- a/graph/pull_v2.go +++ /dev/null @@ -1,728 +0,0 @@ -package graph - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/broadcaster" - "github.com/docker/docker/pkg/progressreader" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" - "golang.org/x/net/context" -) - -type v2Puller struct { - *TagStore - endpoint registry.APIEndpoint - config *ImagePullConfig - sf *streamformatter.StreamFormatter - repoInfo *registry.RepositoryInfo - repo distribution.Repository - sessionID string -} - -func (p *v2Puller) Pull(tag string) (fallback bool, err error) { - // TODO(tiborvass): was ReceiveTimeout - p.repo, err = newV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") - if err != nil { - logrus.Warnf("Error getting v2 registry: %v", err) - return true, err - } - - p.sessionID = stringid.GenerateRandomID() - - if err := p.pullV2Repository(tag); err != nil { - if registry.ContinueOnError(err) { - logrus.Debugf("Error trying v2 registry: %v", err) - return true, err - } - return false, err - } - return false, nil -} - -func (p *v2Puller) pullV2Repository(tag string) (err error) { - var tags []string - taggedName := p.repoInfo.LocalName - if len(tag) > 0 { - tags = []string{tag} - taggedName = utils.ImageReference(p.repoInfo.LocalName, tag) - } else { - var err error - - manSvc, err := p.repo.Manifests(context.Background()) - if err != nil { - return err - } - - tags, err = manSvc.Tags() - if err != nil { - return err - } - - } - - poolKey := "v2:" + taggedName - broadcaster, found := p.poolAdd("pull", poolKey) - broadcaster.Add(p.config.OutStream) - if found { - // Another pull of the same repository is already taking place; just wait for it to finish - return broadcaster.Wait() - } - - // This must use a closure so it captures the value of err when the - // function returns, not when the 'defer' is evaluated. - defer func() { - p.poolRemoveWithError("pull", poolKey, err) - }() - - var layersDownloaded bool - for _, tag := range tags { - // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged - // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? - pulledNew, err := p.pullV2Tag(broadcaster, tag, taggedName) - if err != nil { - return err - } - layersDownloaded = layersDownloaded || pulledNew - } - - writeStatus(taggedName, broadcaster, p.sf, layersDownloaded) - - return nil -} - -// downloadInfo is used to pass information from download to extractor -type downloadInfo struct { - img contentAddressableDescriptor - imgIndex int - tmpFile *os.File - digest digest.Digest - layer distribution.ReadSeekCloser - size int64 - err chan error - poolKey string - broadcaster *broadcaster.Buffered -} - -// contentAddressableDescriptor is used to pass image data from a manifest to the -// graph. -type contentAddressableDescriptor struct { - id string - parent string - strongID digest.Digest - compatibilityID string - config []byte - v1Compatibility []byte -} - -func newContentAddressableImage(v1Compatibility []byte, blobSum digest.Digest, parent digest.Digest) (contentAddressableDescriptor, error) { - img := contentAddressableDescriptor{ - v1Compatibility: v1Compatibility, - } - - var err error - img.config, err = image.MakeImageConfig(v1Compatibility, blobSum, parent) - if err != nil { - return img, err - } - img.strongID, err = image.StrongID(img.config) - if err != nil { - return img, err - } - - unmarshalledConfig, err := image.NewImgJSON(v1Compatibility) - if err != nil { - return img, err - } - - img.compatibilityID = unmarshalledConfig.ID - img.id = img.strongID.Hex() - - return img, nil -} - -// ID returns the actual ID to be used for the downloaded image. This may be -// a computed ID. -func (img contentAddressableDescriptor) ID() string { - return img.id -} - -// Parent returns the parent ID to be used for the image. This may be a -// computed ID. -func (img contentAddressableDescriptor) Parent() string { - return img.parent -} - -// MarshalConfig renders the image structure into JSON. -func (img contentAddressableDescriptor) MarshalConfig() ([]byte, error) { - return img.config, nil -} - -type errVerification struct{} - -func (errVerification) Error() string { return "verification failed" } - -func (p *v2Puller) download(di *downloadInfo) { - logrus.Debugf("pulling blob %q to %s", di.digest, di.img.id) - - blobs := p.repo.Blobs(context.Background()) - - desc, err := blobs.Stat(context.Background(), di.digest) - if err != nil { - logrus.Debugf("Error statting layer: %v", err) - di.err <- err - return - } - di.size = desc.Size - - layerDownload, err := blobs.Open(context.Background(), di.digest) - if err != nil { - logrus.Debugf("Error fetching layer: %v", err) - di.err <- err - return - } - defer layerDownload.Close() - - verifier, err := digest.NewDigestVerifier(di.digest) - if err != nil { - di.err <- err - return - } - - reader := progressreader.New(progressreader.Config{ - In: ioutil.NopCloser(io.TeeReader(layerDownload, verifier)), - Out: di.broadcaster, - Formatter: p.sf, - Size: di.size, - NewLines: false, - ID: stringid.TruncateID(di.img.id), - Action: "Downloading", - }) - io.Copy(di.tmpFile, reader) - - di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Verifying Checksum", nil)) - - if !verifier.Verified() { - err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest) - logrus.Error(err) - di.err <- err - return - } - - di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Download complete", nil)) - - logrus.Debugf("Downloaded %s to tempfile %s", di.img.id, di.tmpFile.Name()) - di.layer = layerDownload - - di.err <- nil -} - -func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (tagUpdated bool, err error) { - logrus.Debugf("Pulling tag from V2 registry: %q", tag) - - manSvc, err := p.repo.Manifests(context.Background()) - if err != nil { - return false, err - } - - unverifiedManifest, err := manSvc.GetByTag(tag) - if err != nil { - return false, err - } - if unverifiedManifest == nil { - return false, fmt.Errorf("image manifest does not exist for tag %q", tag) - } - var verifiedManifest *schema1.Manifest - verifiedManifest, err = verifyManifest(unverifiedManifest, tag) - if err != nil { - return false, err - } - - // remove duplicate layers and check parent chain validity - err = fixManifestLayers(verifiedManifest) - if err != nil { - return false, err - } - - imgs, err := p.getImageInfos(verifiedManifest) - if err != nil { - return false, err - } - - out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name())) - - var downloads []*downloadInfo - - var layerIDs []string - defer func() { - p.graph.Release(p.sessionID, layerIDs...) - - for _, d := range downloads { - p.poolRemoveWithError("pull", d.poolKey, err) - if d.tmpFile != nil { - d.tmpFile.Close() - if err := os.RemoveAll(d.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name()) - } - } - } - }() - - for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { - img := imgs[i] - - p.graph.Retain(p.sessionID, img.id) - layerIDs = append(layerIDs, img.id) - - p.graph.imageMutex.Lock(img.id) - - // Check if exists - if p.graph.Exists(img.id) { - if err := p.validateImageInGraph(img.id, imgs, i); err != nil { - p.graph.imageMutex.Unlock(img.id) - return false, fmt.Errorf("image validation failed: %v", err) - } - logrus.Debugf("Image already exists: %s", img.id) - p.graph.imageMutex.Unlock(img.id) - continue - } - p.graph.imageMutex.Unlock(img.id) - - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.id), "Pulling fs layer", nil)) - - d := &downloadInfo{ - img: img, - imgIndex: i, - poolKey: "v2layer:" + img.id, - digest: verifiedManifest.FSLayers[i].BlobSum, - // TODO: seems like this chan buffer solved hanging problem in go1.5, - // this can indicate some deeper problem that somehow we never take - // error from channel in loop below - err: make(chan error, 1), - } - - tmpFile, err := ioutil.TempFile("", "GetImageBlob") - if err != nil { - return false, err - } - d.tmpFile = tmpFile - - downloads = append(downloads, d) - - broadcaster, found := p.poolAdd("pull", d.poolKey) - broadcaster.Add(out) - d.broadcaster = broadcaster - if found { - d.err <- nil - } else { - go p.download(d) - } - } - - for _, d := range downloads { - if err := <-d.err; err != nil { - return false, err - } - - if d.layer == nil { - // Wait for a different pull to download and extract - // this layer. - err = d.broadcaster.Wait() - if err != nil { - return false, err - } - continue - } - - d.tmpFile.Seek(0, 0) - err := func() error { - reader := progressreader.New(progressreader.Config{ - In: d.tmpFile, - Out: d.broadcaster, - Formatter: p.sf, - Size: d.size, - NewLines: false, - ID: stringid.TruncateID(d.img.id), - Action: "Extracting", - }) - - p.graph.imagesMutex.Lock() - defer p.graph.imagesMutex.Unlock() - - p.graph.imageMutex.Lock(d.img.id) - defer p.graph.imageMutex.Unlock(d.img.id) - - // Must recheck the data on disk if any exists. - // This protects against races where something - // else is written to the graph under this ID - // after attemptIDReuse. - if p.graph.Exists(d.img.id) { - if err := p.validateImageInGraph(d.img.id, imgs, d.imgIndex); err != nil { - return fmt.Errorf("image validation failed: %v", err) - } - } - - if err := p.graph.register(d.img, reader); err != nil { - return err - } - - if err := p.graph.setLayerDigest(d.img.id, d.digest); err != nil { - return err - } - - if err := p.graph.setV1CompatibilityConfig(d.img.id, d.img.v1Compatibility); err != nil { - return err - } - - return nil - }() - if err != nil { - return false, err - } - - d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.id), "Pull complete", nil)) - d.broadcaster.Close() - tagUpdated = true - } - - manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName) - if err != nil { - return false, err - } - - // Check for new tag if no layers downloaded - if !tagUpdated { - repo, err := p.get(p.repoInfo.LocalName) - if err != nil { - return false, err - } - if repo != nil { - if _, exists := repo[tag]; !exists { - tagUpdated = true - } - } else { - tagUpdated = true - } - } - - firstID := layerIDs[len(layerIDs)-1] - if utils.DigestReference(tag) { - // TODO(stevvooe): Ideally, we should always set the digest so we can - // use the digest whether we pull by it or not. Unfortunately, the tag - // store treats the digest as a separate tag, meaning there may be an - // untagged digest image that would seem to be dangling by a user. - if err = p.setDigest(p.repoInfo.LocalName, tag, firstID); err != nil { - return false, err - } - } else { - // only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest) - if err = p.Tag(p.repoInfo.LocalName, tag, firstID, true); err != nil { - return false, err - } - } - - if manifestDigest != "" { - out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest)) - } - - return tagUpdated, nil -} - -func verifyManifest(signedManifest *schema1.SignedManifest, tag string) (m *schema1.Manifest, err error) { - // If pull by digest, then verify the manifest digest. NOTE: It is - // important to do this first, before any other content validation. If the - // digest cannot be verified, don't even bother with those other things. - if manifestDigest, err := digest.ParseDigest(tag); err == nil { - verifier, err := digest.NewDigestVerifier(manifestDigest) - if err != nil { - return nil, err - } - payload, err := signedManifest.Payload() - if err != nil { - // If this failed, the signatures section was corrupted - // or missing. Treat the entire manifest as the payload. - payload = signedManifest.Raw - } - if _, err := verifier.Write(payload); err != nil { - return nil, err - } - if !verifier.Verified() { - err := fmt.Errorf("image verification failed for digest %s", manifestDigest) - logrus.Error(err) - return nil, err - } - - var verifiedManifest schema1.Manifest - if err = json.Unmarshal(payload, &verifiedManifest); err != nil { - return nil, err - } - m = &verifiedManifest - } else { - m = &signedManifest.Manifest - } - - if m.SchemaVersion != 1 { - return nil, fmt.Errorf("unsupported schema version %d for tag %q", m.SchemaVersion, tag) - } - if len(m.FSLayers) != len(m.History) { - return nil, fmt.Errorf("length of history not equal to number of layers for tag %q", tag) - } - if len(m.FSLayers) == 0 { - return nil, fmt.Errorf("no FSLayers in manifest for tag %q", tag) - } - return m, nil -} - -// fixManifestLayers removes repeated layers from the manifest and checks the -// correctness of the parent chain. -func fixManifestLayers(m *schema1.Manifest) error { - images := make([]*image.Image, len(m.FSLayers)) - for i := range m.FSLayers { - img, err := image.NewImgJSON([]byte(m.History[i].V1Compatibility)) - if err != nil { - return err - } - images[i] = img - if err := image.ValidateID(img.ID); err != nil { - return err - } - } - - if images[len(images)-1].Parent != "" && !allowBaseParentImage { - // Windows base layer can point to a base layer parent that is not in manifest. - return errors.New("Invalid parent ID in the base layer of the image.") - } - - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - - var lastID string - for _, img := range images { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - - // backwards loop so that we keep the remaining indexes after removing items - for i := len(images) - 2; i >= 0; i-- { - if images[i].ID == images[i+1].ID { // repeated ID. remove and continue - m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) - m.History = append(m.History[:i], m.History[i+1:]...) - } else if images[i].Parent != images[i+1].ID { - return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", images[i+1].ID, images[i].Parent) - } - } - - return nil -} - -// getImageInfos returns an imageinfo struct for every image in the manifest. -// These objects contain both calculated strongIDs and compatibilityIDs found -// in v1Compatibility object. -func (p *v2Puller) getImageInfos(m *schema1.Manifest) ([]contentAddressableDescriptor, error) { - imgs := make([]contentAddressableDescriptor, len(m.FSLayers)) - - var parent digest.Digest - for i := len(imgs) - 1; i >= 0; i-- { - var err error - imgs[i], err = newContentAddressableImage([]byte(m.History[i].V1Compatibility), m.FSLayers[i].BlobSum, parent) - if err != nil { - return nil, err - } - parent = imgs[i].strongID - } - - p.attemptIDReuse(imgs) - - // reset the base layer parent for windows - if allowBaseParentImage { - var base struct{ Parent string } - if err := json.Unmarshal(imgs[len(imgs)-1].v1Compatibility, &base); err != nil { - return nil, err - } - if base.Parent != "" { - imgs[len(imgs)-1].parent = base.Parent - } - } - - return imgs, nil -} - -// attemptIDReuse does a best attempt to match verified compatibilityIDs -// already in the graph with the computed strongIDs so we can keep using them. -// This process will never fail but may just return the strongIDs if none of -// the compatibilityIDs exists or can be verified. If the strongIDs themselves -// fail verification, we deterministically generate alternate IDs to use until -// we find one that's available or already exists with the correct data. -func (p *v2Puller) attemptIDReuse(imgs []contentAddressableDescriptor) { - // This function needs to be protected with a global lock, because it - // locks multiple IDs at once, and there's no good way to make sure - // the locking happens a deterministic order. - p.graph.imagesMutex.Lock() - defer p.graph.imagesMutex.Unlock() - - idMap := make(map[string]struct{}) - for _, img := range imgs { - idMap[img.id] = struct{}{} - idMap[img.compatibilityID] = struct{}{} - - if p.graph.Exists(img.compatibilityID) { - if _, err := p.graph.generateV1CompatibilityChain(img.compatibilityID); err != nil { - logrus.Debugf("Migration v1Compatibility generation error: %v", err) - return - } - } - } - for id := range idMap { - p.graph.imageMutex.Lock(id) - defer p.graph.imageMutex.Unlock(id) - } - - // continueReuse controls whether the function will try to find - // existing layers on disk under the old v1 IDs, to avoid repulling - // them. The hashes are checked to ensure these layers are okay to - // use. continueReuse starts out as true, but is set to false if - // the code encounters something that doesn't match the expected hash. - continueReuse := true - - for i := len(imgs) - 1; i >= 0; i-- { - // TODO - (swernli:11-16-2015) Skipping content addressable IDs on - // Windows as a hack for TP4 compat. The correct fix is to ensure that - // Windows layers do not have anything in them that takes a dependency - // on the ID of the layer in the management client. This will be fixed - // in Windows post-TP4. - if runtime.GOOS == "windows" { - imgs[i].id = imgs[i].compatibilityID - } - - if p.graph.Exists(imgs[i].id) { - // Found an image in the graph under the strongID. Validate the - // image before using it. - if err := p.validateImageInGraph(imgs[i].id, imgs, i); err != nil { - continueReuse = false - logrus.Debugf("not using existing strongID: %v", err) - - // The strong ID existed in the graph but didn't - // validate successfully. We can't use the strong ID - // because it didn't validate successfully. Treat the - // graph like a hash table with probing... compute - // SHA256(id) until we find an ID that either doesn't - // already exist in the graph, or has existing content - // that validates successfully. - for { - if err := p.tryNextID(imgs, i, idMap); err != nil { - logrus.Debug(err.Error()) - } else { - break - } - } - } - continue - } - - if continueReuse { - compatibilityID := imgs[i].compatibilityID - if err := p.validateImageInGraph(compatibilityID, imgs, i); err != nil { - logrus.Debugf("stopping ID reuse: %v", err) - continueReuse = false - } else { - // The compatibility ID exists in the graph and was - // validated. Use it. - imgs[i].id = compatibilityID - } - } - } - - // fix up the parents of the images - for i := 0; i < len(imgs); i++ { - if i == len(imgs)-1 { // Base layer - imgs[i].parent = "" - } else { - imgs[i].parent = imgs[i+1].id - } - } -} - -// validateImageInGraph checks that an image in the graph has the expected -// strongID. id is the entry in the graph to check, imgs is the slice of -// images being processed (for access to the parent), and i is the index -// into this slice which the graph entry should be checked against. -func (p *v2Puller) validateImageInGraph(id string, imgs []contentAddressableDescriptor, i int) error { - img, err := p.graph.Get(id) - if err != nil { - return fmt.Errorf("missing: %v", err) - } - if runtime.GOOS == "windows" { - // TODO - (swernli:11-16-2015) Skipping content addressable IDs on - // Windows as a hack for TP4 compat. The correct fix is to ensure that - // Windows layers do not have anything in them that takes a dependency - // on the ID of the layer in the management client. This will be fixed - // in Windows post-TP4. - return nil - } - layerID, err := p.graph.getLayerDigest(id) - if err != nil { - return fmt.Errorf("digest: %v", err) - } - var parentID digest.Digest - if i != len(imgs)-1 { - if img.Parent != imgs[i+1].id { // comparing that graph points to validated ID - return fmt.Errorf("parent: %v %v", img.Parent, imgs[i+1].id) - } - parentID = imgs[i+1].strongID - } else if img.Parent != "" { - return fmt.Errorf("unexpected parent: %v", img.Parent) - } - - v1Config, err := p.graph.getV1CompatibilityConfig(img.ID) - if err != nil { - return fmt.Errorf("v1Compatibility: %v %v", img.ID, err) - } - - json, err := image.MakeImageConfig(v1Config, layerID, parentID) - if err != nil { - return fmt.Errorf("make config: %v", err) - } - - if dgst, err := image.StrongID(json); err == nil && dgst == imgs[i].strongID { - logrus.Debugf("Validated %v as %v", dgst, id) - } else { - return fmt.Errorf("digest mismatch: %v %v, error: %v", dgst, imgs[i].strongID, err) - } - - // All clear - return nil -} - -func (p *v2Puller) tryNextID(imgs []contentAddressableDescriptor, i int, idMap map[string]struct{}) error { - nextID, _ := digest.FromBytes([]byte(imgs[i].id)) - imgs[i].id = nextID.Hex() - - if _, exists := idMap[imgs[i].id]; !exists { - p.graph.imageMutex.Lock(imgs[i].id) - defer p.graph.imageMutex.Unlock(imgs[i].id) - } - - if p.graph.Exists(imgs[i].id) { - if err := p.validateImageInGraph(imgs[i].id, imgs, i); err != nil { - return fmt.Errorf("not using existing strongID permutation %s: %v", imgs[i].id, err) - } - } - return nil -} diff --git a/graph/pull_v2_test.go b/graph/pull_v2_test.go deleted file mode 100644 index f7ec1e28c6..0000000000 --- a/graph/pull_v2_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package graph - -import ( - "encoding/json" - "io/ioutil" - "reflect" - "strings" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" -) - -// TestFixManifestLayers checks that fixManifestLayers removes a duplicate -// layer, and that it makes no changes to the manifest when called a second -// time, after the duplicate is removed. -func TestFixManifestLayers(t *testing.T) { - duplicateLayerManifest := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - duplicateLayerManifestExpectedOutput := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - if err := fixManifestLayers(&duplicateLayerManifest); err != nil { - t.Fatalf("unexpected error from fixManifestLayers: %v", err) - } - - if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { - t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") - } - - // Run fixManifestLayers again and confirm that it doesn't change the - // manifest (which no longer has duplicate layers). - if err := fixManifestLayers(&duplicateLayerManifest); err != nil { - t.Fatalf("unexpected error from fixManifestLayers: %v", err) - } - - if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { - t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") - } -} - -// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails -// if the base layer configuration specifies a parent. -func TestFixManifestLayersBaseLayerParent(t *testing.T) { - duplicateLayerManifest := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID in the base layer of the image.") { - t.Fatalf("expected an invalid parent ID error from fixManifestLayers") - } -} - -// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails -// if an image configuration specifies a parent that doesn't directly follow -// that (deduplicated) image in the image history. -func TestFixManifestLayersBadParent(t *testing.T) { - duplicateLayerManifest := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { - t.Fatalf("expected an invalid parent ID error from fixManifestLayers") - } -} - -// TestValidateManifest verifies the validateManifest function -func TestValidateManifest(t *testing.T) { - expectedDigest := "sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd" - expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - - // Good manifest - - goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") - if err != nil { - t.Fatal("error reading fixture:", err) - } - - var goodSignedManifest schema1.SignedManifest - err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) - if err != nil { - t.Fatal("error unmarshaling manifest:", err) - } - - verifiedManifest, err := verifyManifest(&goodSignedManifest, expectedDigest) - if err != nil { - t.Fatal("validateManifest failed:", err) - } - - if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { - t.Fatal("unexpected FSLayer in good manifest") - } - - // "Extra data" manifest - - extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") - if err != nil { - t.Fatal("error reading fixture:", err) - } - - var extraDataSignedManifest schema1.SignedManifest - err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) - if err != nil { - t.Fatal("error unmarshaling manifest:", err) - } - - verifiedManifest, err = verifyManifest(&extraDataSignedManifest, expectedDigest) - if err != nil { - t.Fatal("validateManifest failed:", err) - } - - if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { - t.Fatal("unexpected FSLayer in extra data manifest") - } - - // Bad manifest - - badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") - if err != nil { - t.Fatal("error reading fixture:", err) - } - - var badSignedManifest schema1.SignedManifest - err = json.Unmarshal(badManifestBytes, &badSignedManifest) - if err != nil { - t.Fatal("error unmarshaling manifest:", err) - } - - verifiedManifest, err = verifyManifest(&badSignedManifest, expectedDigest) - if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { - t.Fatal("expected validateManifest to fail with digest error") - } - - // Manifest with no signature - - expectedWholeFileDigest := "7ec3615a120efcdfc270e9c7ea4183330775a3e52a09e2efb194b9a7c18e5ff7" - - noSignatureManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/no_signature_manifest") - if err != nil { - t.Fatal("error reading fixture:", err) - } - - var noSignatureSignedManifest schema1.SignedManifest - noSignatureSignedManifest.Raw = noSignatureManifestBytes - err = json.Unmarshal(noSignatureManifestBytes, &noSignatureSignedManifest.Manifest) - if err != nil { - t.Fatal("error unmarshaling manifest:", err) - } - - verifiedManifest, err = verifyManifest(&noSignatureSignedManifest, expectedWholeFileDigest) - if err != nil { - t.Fatal("validateManifest failed:", err) - } - - if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { - t.Fatal("unexpected FSLayer in no-signature manifest") - } -} diff --git a/graph/push.go b/graph/push.go deleted file mode 100644 index 750eec50e0..0000000000 --- a/graph/push.go +++ /dev/null @@ -1,126 +0,0 @@ -package graph - -import ( - "fmt" - "io" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/registry" -) - -// ImagePushConfig stores push configuration. -type ImagePushConfig struct { - // MetaHeaders store HTTP headers with metadata about the image - // (DockerHeaders with prefix X-Meta- in the request). - MetaHeaders map[string][]string - // AuthConfig holds authentication credentials for authenticating with - // the registry. - AuthConfig *cliconfig.AuthConfig - // Tag is the specific variant of the image to be pushed. - // If no tag is provided, all tags will be pushed. - Tag string - // OutStream is the output writer for showing the status of the push - // operation. - OutStream io.Writer -} - -// pusher is an interface that abstracts pushing for different API versions. -type pusher interface { - // Push tries to push the image configured at the creation of Pusher. - // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. - // - // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. - Push() (fallback bool, err error) -} - -// newPusher creates a new Pusher interface that will push to either a v1 or v2 -// registry. The endpoint argument contains a Version field that determines -// whether a v1 or v2 pusher will be created. The other parameters are passed -// through to the underlying pusher implementation for use during the actual -// push operation. -func (s *TagStore) newPusher(endpoint registry.APIEndpoint, localRepo repository, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig, sf *streamformatter.StreamFormatter) (pusher, error) { - switch endpoint.Version { - case registry.APIVersion2: - return &v2Pusher{ - TagStore: s, - endpoint: endpoint, - localRepo: localRepo, - repoInfo: repoInfo, - config: imagePushConfig, - sf: sf, - layersPushed: make(map[digest.Digest]bool), - }, nil - case registry.APIVersion1: - return &v1Pusher{ - TagStore: s, - endpoint: endpoint, - localRepo: localRepo, - repoInfo: repoInfo, - config: imagePushConfig, - sf: sf, - }, nil - } - return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) -} - -// Push initiates a push operation on the repository named localName. -func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error { - // FIXME: Allow to interrupt current push when new push of same image is done. - - var sf = streamformatter.NewJSONStreamFormatter() - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := s.registryService.ResolveRepository(localName) - if err != nil { - return err - } - - endpoints, err := s.registryService.LookupPushEndpoints(repoInfo.CanonicalName) - if err != nil { - return err - } - - reposLen := 1 - if imagePushConfig.Tag == "" { - reposLen = len(s.Repositories[repoInfo.LocalName]) - } - - imagePushConfig.OutStream.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen)) - - // If it fails, try to get the repository - localRepo, exists := s.Repositories[repoInfo.LocalName] - if !exists { - return fmt.Errorf("Repository does not exist: %s", repoInfo.LocalName) - } - - var lastErr error - for _, endpoint := range endpoints { - logrus.Debugf("Trying to push %s to %s %s", repoInfo.CanonicalName, endpoint.URL, endpoint.Version) - - pusher, err := s.newPusher(endpoint, localRepo, repoInfo, imagePushConfig, sf) - if err != nil { - lastErr = err - continue - } - if fallback, err := pusher.Push(); err != nil { - if fallback { - lastErr = err - continue - } - logrus.Debugf("Not continuing with error: %v", err) - return err - - } - - s.eventsService.Log("push", repoInfo.LocalName, "") - return nil - } - - if lastErr == nil { - lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.CanonicalName) - } - return lastErr -} diff --git a/graph/push_v1.go b/graph/push_v1.go deleted file mode 100644 index 01ad73ed20..0000000000 --- a/graph/push_v1.go +++ /dev/null @@ -1,354 +0,0 @@ -package graph - -import ( - "fmt" - "io" - "os" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progressreader" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" -) - -type v1Pusher struct { - *TagStore - endpoint registry.APIEndpoint - localRepo repository - repoInfo *registry.RepositoryInfo - config *ImagePushConfig - sf *streamformatter.StreamFormatter - session *registry.Session - - out io.Writer -} - -func (p *v1Pusher) Push() (fallback bool, err error) { - tlsConfig, err := p.registryService.TLSConfig(p.repoInfo.Index.Name) - if err != nil { - return false, err - } - // Adds Docker-specific headers as well as user-specified headers (metaHeaders) - tr := transport.NewTransport( - // TODO(tiborvass): was NoTimeout - registry.NewTransport(tlsConfig), - registry.DockerHeaders(p.config.MetaHeaders)..., - ) - client := registry.HTTPClient(tr) - v1Endpoint, err := p.endpoint.ToV1Endpoint(p.config.MetaHeaders) - if err != nil { - logrus.Debugf("Could not get v1 endpoint: %v", err) - return true, err - } - p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) - if err != nil { - // TODO(dmcgowan): Check if should fallback - return true, err - } - if err := p.pushRepository(p.config.Tag); err != nil { - // TODO(dmcgowan): Check if should fallback - return false, err - } - return false, nil -} - -// Retrieve the all the images to be uploaded in the correct order -func (p *v1Pusher) getImageList(requestedTag string) ([]string, map[string][]string, error) { - var ( - imageList []string - imagesSeen = make(map[string]bool) - tagsByImage = make(map[string][]string) - ) - - for tag, id := range p.localRepo { - if requestedTag != "" && requestedTag != tag { - // Include only the requested tag. - continue - } - - if utils.DigestReference(tag) { - // Ignore digest references. - continue - } - - var imageListForThisTag []string - - tagsByImage[id] = append(tagsByImage[id], tag) - - for img, err := p.graph.Get(id); img != nil; img, err = p.graph.GetParent(img) { - if err != nil { - return nil, nil, err - } - - if imagesSeen[img.ID] { - // This image is already on the list, we can ignore it and all its parents - break - } - - imagesSeen[img.ID] = true - imageListForThisTag = append(imageListForThisTag, img.ID) - } - - // reverse the image list for this tag (so the "most"-parent image is first) - for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { - imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] - } - - // append to main image list - imageList = append(imageList, imageListForThisTag...) - } - if len(imageList) == 0 { - return nil, nil, fmt.Errorf("No images found for the requested repository / tag") - } - logrus.Debugf("Image list: %v", imageList) - logrus.Debugf("Tags by image: %v", tagsByImage) - - return imageList, tagsByImage, nil -} - -// createImageIndex returns an index of an image's layer IDs and tags. -func (s *TagStore) createImageIndex(images []string, tags map[string][]string) []*registry.ImgData { - var imageIndex []*registry.ImgData - for _, id := range images { - if tags, hasTags := tags[id]; hasTags { - // If an image has tags you must add an entry in the image index - // for each tag - for _, tag := range tags { - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: id, - Tag: tag, - }) - } - continue - } - // If the image does not have a tag it still needs to be sent to the - // registry with an empty tag so that it is associated with the repository - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: id, - Tag: "", - }) - } - return imageIndex -} - -type imagePushData struct { - id string - compatibilityID string - endpoint string -} - -// lookupImageOnEndpoint checks the specified endpoint to see if an image exists -// and if it is absent then it sends the image id to the channel to be pushed. -func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, images chan imagePushData, imagesToPush chan string) { - defer wg.Done() - for image := range images { - if err := p.session.LookupRemoteImage(image.compatibilityID, image.endpoint); err != nil { - logrus.Errorf("Error in LookupRemoteImage: %s", err) - imagesToPush <- image.id - continue - } - p.out.Write(p.sf.FormatStatus("", "Image %s already pushed, skipping", stringid.TruncateID(image.id))) - } -} - -func (p *v1Pusher) pushImageToEndpoint(endpoint string, imageIDs []string, tags map[string][]string, repo *registry.RepositoryData) error { - workerCount := len(imageIDs) - // start a maximum of 5 workers to check if images exist on the specified endpoint. - if workerCount > 5 { - workerCount = 5 - } - var ( - wg = &sync.WaitGroup{} - imageData = make(chan imagePushData, workerCount*2) - imagesToPush = make(chan string, workerCount*2) - pushes = make(chan map[string]struct{}, 1) - ) - for i := 0; i < workerCount; i++ { - wg.Add(1) - go p.lookupImageOnEndpoint(wg, imageData, imagesToPush) - } - // start a go routine that consumes the images to push - go func() { - shouldPush := make(map[string]struct{}) - for id := range imagesToPush { - shouldPush[id] = struct{}{} - } - pushes <- shouldPush - }() - for _, id := range imageIDs { - compatibilityID, err := p.getV1ID(id) - if err != nil { - return err - } - imageData <- imagePushData{ - id: id, - compatibilityID: compatibilityID, - endpoint: endpoint, - } - } - // close the channel to notify the workers that there will be no more images to check. - close(imageData) - wg.Wait() - close(imagesToPush) - // wait for all the images that require pushes to be collected into a consumable map. - shouldPush := <-pushes - // finish by pushing any images and tags to the endpoint. The order that the images are pushed - // is very important that is why we are still iterating over the ordered list of imageIDs. - for _, id := range imageIDs { - if _, push := shouldPush[id]; push { - if _, err := p.pushImage(id, endpoint); err != nil { - // FIXME: Continue on error? - return err - } - } - for _, tag := range tags[id] { - p.out.Write(p.sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(id), endpoint+"repositories/"+p.repoInfo.RemoteName+"/tags/"+tag)) - compatibilityID, err := p.getV1ID(id) - if err != nil { - return err - } - if err := p.session.PushRegistryTag(p.repoInfo.RemoteName, compatibilityID, tag, endpoint); err != nil { - return err - } - } - } - return nil -} - -// pushRepository pushes layers that do not already exist on the registry. -func (p *v1Pusher) pushRepository(tag string) error { - logrus.Debugf("Local repo: %s", p.localRepo) - p.out = ioutils.NewWriteFlusher(p.config.OutStream) - imgList, tags, err := p.getImageList(tag) - if err != nil { - return err - } - p.out.Write(p.sf.FormatStatus("", "Sending image list")) - - imageIndex := p.createImageIndex(imgList, tags) - logrus.Debugf("Preparing to push %s with the following images and tags", p.localRepo) - for _, data := range imageIndex { - logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) - - // convert IDs to compatibilityIDs, imageIndex only used in registry calls - data.ID, err = p.getV1ID(data.ID) - if err != nil { - return err - } - } - - if _, found := p.poolAdd("push", p.repoInfo.LocalName); found { - return fmt.Errorf("push or pull %s is already in progress", p.repoInfo.LocalName) - } - defer p.poolRemove("push", p.repoInfo.LocalName) - - // Register all the images in a repository with the registry - // If an image is not in this list it will not be associated with the repository - repoData, err := p.session.PushImageJSONIndex(p.repoInfo.RemoteName, imageIndex, false, nil) - if err != nil { - return err - } - nTag := 1 - if tag == "" { - nTag = len(p.localRepo) - } - p.out.Write(p.sf.FormatStatus("", "Pushing repository %s (%d tags)", p.repoInfo.CanonicalName, nTag)) - // push the repository to each of the endpoints only if it does not exist. - for _, endpoint := range repoData.Endpoints { - if err := p.pushImageToEndpoint(endpoint, imgList, tags, repoData); err != nil { - return err - } - } - _, err = p.session.PushImageJSONIndex(p.repoInfo.RemoteName, imageIndex, true, repoData.Endpoints) - return err -} - -func (p *v1Pusher) pushImage(imgID, ep string) (checksum string, err error) { - jsonRaw, err := p.getV1Config(imgID) - if err != nil { - return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) - } - p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil)) - - compatibilityID, err := p.getV1ID(imgID) - if err != nil { - return "", err - } - - // General rule is to use ID for graph accesses and compatibilityID for - // calls to session.registry() - imgData := ®istry.ImgData{ - ID: compatibilityID, - } - - // Send the json - if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { - if err == registry.ErrAlreadyExists { - p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image already pushed, skipping", nil)) - return "", nil - } - return "", err - } - - layerData, err := p.graph.tempLayerArchive(imgID, p.sf, p.out) - if err != nil { - return "", fmt.Errorf("Failed to generate layer archive: %s", err) - } - defer os.RemoveAll(layerData.Name()) - - // Send the layer - logrus.Debugf("rendered layer for %s of [%d] size", imgID, layerData.Size) - - checksum, checksumPayload, err := p.session.PushImageLayerRegistry(imgData.ID, - progressreader.New(progressreader.Config{ - In: layerData, - Out: p.out, - Formatter: p.sf, - Size: layerData.Size, - NewLines: false, - ID: stringid.TruncateID(imgID), - Action: "Pushing", - }), ep, jsonRaw) - if err != nil { - return "", err - } - imgData.Checksum = checksum - imgData.ChecksumPayload = checksumPayload - // Send the checksum - if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { - return "", err - } - - p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image successfully pushed", nil)) - return imgData.Checksum, nil -} - -// getV1ID returns the compatibilityID for the ID in the graph. compatibilityID -// is read from from the v1Compatibility config file in the disk. -func (p *v1Pusher) getV1ID(id string) (string, error) { - jsonData, err := p.getV1Config(id) - if err != nil { - return "", err - } - img, err := image.NewImgJSON(jsonData) - if err != nil { - return "", err - } - return img.ID, nil -} - -// getV1Config returns v1Compatibility config for the image in the graph. If -// there is no v1Compatibility file on disk for the image -func (p *v1Pusher) getV1Config(id string) ([]byte, error) { - jsonData, err := p.graph.generateV1CompatibilityChain(id) - if err != nil { - return nil, err - } - return jsonData, nil -} diff --git a/graph/push_v2.go b/graph/push_v2.go deleted file mode 100644 index 1b9cb443f5..0000000000 --- a/graph/push_v2.go +++ /dev/null @@ -1,397 +0,0 @@ -package graph - -import ( - "bufio" - "compress/gzip" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/progressreader" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/registry" - "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" - "golang.org/x/net/context" -) - -const compressionBufSize = 32768 - -type v2Pusher struct { - *TagStore - endpoint registry.APIEndpoint - localRepo repository - repoInfo *registry.RepositoryInfo - config *ImagePushConfig - sf *streamformatter.StreamFormatter - repo distribution.Repository - - // layersPushed is the set of layers known to exist on the remote side. - // This avoids redundant queries when pushing multiple tags that - // involve the same layers. - layersPushed map[digest.Digest]bool -} - -func (p *v2Pusher) Push() (fallback bool, err error) { - p.repo, err = newV2Repository(p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") - if err != nil { - logrus.Debugf("Error getting v2 registry: %v", err) - return true, err - } - return false, p.pushV2Repository(p.config.Tag) -} - -func (p *v2Pusher) getImageTags(askedTag string) ([]string, error) { - logrus.Debugf("Checking %q against %#v", askedTag, p.localRepo) - if len(askedTag) > 0 { - if _, ok := p.localRepo[askedTag]; !ok || utils.DigestReference(askedTag) { - return nil, fmt.Errorf("Tag does not exist for %s", askedTag) - } - return []string{askedTag}, nil - } - var tags []string - for tag := range p.localRepo { - if !utils.DigestReference(tag) { - tags = append(tags, tag) - } - } - return tags, nil -} - -func (p *v2Pusher) pushV2Repository(tag string) error { - localName := p.repoInfo.LocalName - if _, found := p.poolAdd("push", localName); found { - return fmt.Errorf("push or pull %s is already in progress", localName) - } - defer p.poolRemove("push", localName) - - tags, err := p.getImageTags(tag) - if err != nil { - return fmt.Errorf("error getting tags for %s: %s", localName, err) - } - if len(tags) == 0 { - return fmt.Errorf("no tags to push for %s", localName) - } - - for _, tag := range tags { - if err := p.pushV2Tag(tag); err != nil { - return err - } - } - - return nil -} - -func (p *v2Pusher) pushV2Tag(tag string) error { - logrus.Debugf("Pushing repository: %s:%s", p.repo.Name(), tag) - - layerID, exists := p.localRepo[tag] - if !exists { - return fmt.Errorf("tag does not exist: %s", tag) - } - - layersSeen := make(map[string]bool) - - layer, err := p.graph.Get(layerID) - if err != nil { - return err - } - - m := &schema1.Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: p.repo.Name(), - Tag: tag, - Architecture: layer.Architecture, - FSLayers: []schema1.FSLayer{}, - History: []schema1.History{}, - } - - var metadata runconfig.Config - if layer != nil && layer.Config != nil { - metadata = *layer.Config - } - - out := p.config.OutStream - - for ; layer != nil; layer, err = p.graph.GetParent(layer) { - if err != nil { - return err - } - - // break early if layer has already been seen in this image, - // this prevents infinite loops on layers which loopback, this - // cannot be prevented since layer IDs are not merkle hashes - // TODO(dmcgowan): throw error if no valid use case is found - if layersSeen[layer.ID] { - break - } - - // Skip the base layer on Windows. This cannot be pushed. - if allowBaseParentImage && layer.Parent == "" { - break - } - - logrus.Debugf("Pushing layer: %s", layer.ID) - - if layer.Config != nil && metadata.Image != layer.ID { - if err := runconfig.Merge(&metadata, layer.Config); err != nil { - return err - } - } - - var exists bool - dgst, err := p.graph.getLayerDigestWithLock(layer.ID) - switch err { - case nil: - if p.layersPushed[dgst] { - exists = true - // break out of switch, it is already known that - // the push is not needed and therefore doing a - // stat is unnecessary - break - } - _, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst) - switch err { - case nil: - exists = true - out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image already exists", nil)) - case distribution.ErrBlobUnknown: - // nop - default: - out.Write(p.sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil)) - return err - } - case errDigestNotSet: - // nop - case digest.ErrDigestInvalidFormat, digest.ErrDigestUnsupported: - return fmt.Errorf("error getting image checksum: %v", err) - } - - // if digest was empty or not saved, or if blob does not exist on the remote repository, - // then fetch it. - if !exists { - var pushDigest digest.Digest - if pushDigest, err = p.pushV2Image(p.repo.Blobs(context.Background()), layer); err != nil { - return err - } - if dgst == "" { - // Cache new checksum - if err := p.graph.setLayerDigestWithLock(layer.ID, pushDigest); err != nil { - return err - } - } - dgst = pushDigest - } - - // read v1Compatibility config, generate new if needed - jsonData, err := p.graph.generateV1CompatibilityChain(layer.ID) - if err != nil { - return err - } - - m.FSLayers = append(m.FSLayers, schema1.FSLayer{BlobSum: dgst}) - m.History = append(m.History, schema1.History{V1Compatibility: string(jsonData)}) - - layersSeen[layer.ID] = true - p.layersPushed[dgst] = true - } - - // Fix parent chain if necessary - if err = fixHistory(m); err != nil { - return err - } - - logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", p.repo.Name(), tag, p.trustKey.KeyID()) - signed, err := schema1.Sign(m, p.trustKey) - if err != nil { - return err - } - - manifestDigest, manifestSize, err := digestFromManifest(signed, p.repo.Name()) - if err != nil { - return err - } - if manifestDigest != "" { - out.Write(p.sf.FormatStatus("", "%s: digest: %s size: %d", tag, manifestDigest, manifestSize)) - } - - manSvc, err := p.repo.Manifests(context.Background()) - if err != nil { - return err - } - return manSvc.Put(signed) -} - -// fixHistory makes sure that the manifest has parent IDs that are consistent -// with its image IDs. Because local image IDs are generated from the -// configuration and filesystem contents, but IDs in the manifest are preserved -// from the original pull, it's possible to have inconsistencies where parent -// IDs don't match up with the other IDs in the manifest. This happens in the -// case where an engine pulls images where are identical except the IDs from the -// manifest - the local ID will be the same, and one of the v1Compatibility -// files gets discarded. -func fixHistory(m *schema1.Manifest) error { - var lastID string - - for i := len(m.History) - 1; i >= 0; i-- { - var historyEntry map[string]*json.RawMessage - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &historyEntry); err != nil { - return err - } - - idJSON, present := historyEntry["id"] - if !present || idJSON == nil { - return errors.New("missing id key in v1compatibility file") - } - var id string - if err := json.Unmarshal(*idJSON, &id); err != nil { - return err - } - - parentJSON, present := historyEntry["parent"] - - if i == len(m.History)-1 { - // The base layer must not reference a parent layer, - // otherwise the manifest is incomplete. There is an - // exception for Windows to handle base layers. - if !allowBaseParentImage && present && parentJSON != nil { - var parent string - if err := json.Unmarshal(*parentJSON, &parent); err != nil { - return err - } - if parent != "" { - logrus.Debugf("parent id mismatch detected; fixing. parent reference: %s", parent) - delete(historyEntry, "parent") - fixedHistory, err := json.Marshal(historyEntry) - if err != nil { - return err - } - m.History[i].V1Compatibility = string(fixedHistory) - } - } - } else { - // For all other layers, the parent ID should equal the - // ID of the next item in the history list. If it - // doesn't, fix it up (but preserve all other fields, - // possibly including fields that aren't known to this - // engine version). - if !present || parentJSON == nil { - return errors.New("missing parent key in v1compatibility file") - } - var parent string - if err := json.Unmarshal(*parentJSON, &parent); err != nil { - return err - } - if parent != lastID { - logrus.Debugf("parent id mismatch detected; fixing. parent reference: %s actual id: %s", parent, id) - historyEntry["parent"] = rawJSON(lastID) - fixedHistory, err := json.Marshal(historyEntry) - if err != nil { - return err - } - m.History[i].V1Compatibility = string(fixedHistory) - } - } - lastID = id - } - - return nil -} - -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) -} - -func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) { - out := p.config.OutStream - - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Preparing", nil)) - - image, err := p.graph.Get(img.ID) - if err != nil { - return "", err - } - arch, err := p.graph.tarLayer(image) - if err != nil { - return "", err - } - defer arch.Close() - - // Send the layer - layerUpload, err := bs.Create(context.Background()) - if err != nil { - return "", err - } - defer layerUpload.Close() - - reader := progressreader.New(progressreader.Config{ - In: ioutil.NopCloser(arch), // we'll take care of close here. - Out: out, - Formatter: p.sf, - - // TODO(stevvooe): This may cause a size reporting error. Try to get - // this from tar-split or elsewhere. The main issue here is that we - // don't want to buffer to disk *just* to calculate the size. - Size: img.Size, - - NewLines: false, - ID: stringid.TruncateID(img.ID), - Action: "Pushing", - }) - - digester := digest.Canonical.New() - // HACK: The MultiWriter doesn't write directly to layerUpload because - // we must make sure the ReadFrom is used, not Write. Using Write would - // send a PATCH request for every Write call. - pipeReader, pipeWriter := io.Pipe() - // Use a bufio.Writer to avoid excessive chunking in HTTP request. - bufWriter := bufio.NewWriterSize(io.MultiWriter(pipeWriter, digester.Hash()), compressionBufSize) - compressor := gzip.NewWriter(bufWriter) - - go func() { - _, err := io.Copy(compressor, reader) - if err == nil { - err = compressor.Close() - } - if err == nil { - err = bufWriter.Flush() - } - if err != nil { - pipeWriter.CloseWithError(err) - } else { - pipeWriter.Close() - } - }() - - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil)) - nn, err := layerUpload.ReadFrom(pipeReader) - pipeReader.Close() - if err != nil { - return "", err - } - - dgst := digester.Digest() - if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil { - return "", err - } - - logrus.Debugf("uploaded layer %s (%s), %d bytes", img.ID, dgst, nn) - out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushed", nil)) - - return dgst, nil -} diff --git a/graph/registry.go b/graph/registry.go deleted file mode 100644 index a9a4ad187e..0000000000 --- a/graph/registry.go +++ /dev/null @@ -1,116 +0,0 @@ -package graph - -import ( - "errors" - "net" - "net/http" - "net/url" - "time" - - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/registry" - "golang.org/x/net/context" -) - -type dumbCredentialStore struct { - auth *cliconfig.AuthConfig -} - -func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) { - return dcs.auth.Username, dcs.auth.Password -} - -// newV2Repository returns a repository (v2 only). It creates a HTTP transport -// providing timeout settings and authentication support, and also verifies the -// remote API version. -func newV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *cliconfig.AuthConfig, actions ...string) (distribution.Repository, error) { - ctx := context.Background() - - repoName := repoInfo.CanonicalName - // If endpoint does not support CanonicalName, use the RemoteName instead - if endpoint.TrimHostname { - repoName = repoInfo.RemoteName - } - - // TODO(dmcgowan): Call close idle connections when complete, use keep alive - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: endpoint.TLSConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - - modifiers := registry.DockerHeaders(metaHeaders) - authTransport := transport.NewTransport(base, modifiers...) - pingClient := &http.Client{ - Transport: authTransport, - Timeout: 15 * time.Second, - } - endpointStr := strings.TrimRight(endpoint.URL, "/") + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - return nil, err - } - resp, err := pingClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - versions := auth.APIVersions(resp, endpoint.VersionHeader) - if endpoint.VersionHeader != "" && len(endpoint.Versions) > 0 { - var foundVersion bool - for _, version := range endpoint.Versions { - for _, pingVersion := range versions { - if version == pingVersion { - foundVersion = true - } - } - } - if !foundVersion { - return nil, errors.New("endpoint does not support v2 API") - } - } - - challengeManager := auth.NewSimpleChallengeManager() - if err := challengeManager.AddResponse(resp); err != nil { - return nil, err - } - - creds := dumbCredentialStore{auth: authConfig} - tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, actions...) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) - tr := transport.NewTransport(base, modifiers...) - - return client.NewRepository(ctx, repoName, endpoint.URL, tr) -} - -func digestFromManifest(m *schema1.SignedManifest, localName string) (digest.Digest, int, error) { - payload, err := m.Payload() - if err != nil { - // If this failed, the signatures section was corrupted - // or missing. Treat the entire manifest as the payload. - payload = m.Raw - } - manifestDigest, err := digest.FromBytes(payload) - if err != nil { - logrus.Infof("Could not compute manifest digest for %s:%s : %v", localName, m.Tag, err) - } - return manifestDigest, len(payload), nil -} diff --git a/graph/service.go b/graph/service.go deleted file mode 100644 index 11fe926655..0000000000 --- a/graph/service.go +++ /dev/null @@ -1,89 +0,0 @@ -package graph - -import ( - "fmt" - "io" - "runtime" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/utils" -) - -// Lookup looks up an image by name in a TagStore and returns it as an -// ImageInspect structure. -func (s *TagStore) Lookup(name string) (*types.ImageInspect, error) { - image, err := s.LookupImage(name) - if err != nil || image == nil { - return nil, fmt.Errorf("No such image: %s", name) - } - - var repoTags = make([]string, 0) - var repoDigests = make([]string, 0) - - s.Lock() - for repoName, repository := range s.Repositories { - for ref, id := range repository { - if id == image.ID { - imgRef := utils.ImageReference(repoName, ref) - if utils.DigestReference(ref) { - repoDigests = append(repoDigests, imgRef) - } else { - repoTags = append(repoTags, imgRef) - } - } - } - } - s.Unlock() - - imageInspect := &types.ImageInspect{ - ID: image.ID, - RepoTags: repoTags, - RepoDigests: repoDigests, - Parent: image.Parent, - Comment: image.Comment, - Created: image.Created.Format(time.RFC3339Nano), - Container: image.Container, - ContainerConfig: &image.ContainerConfig, - DockerVersion: image.DockerVersion, - Author: image.Author, - Config: image.Config, - Architecture: image.Architecture, - Os: image.OS, - Size: image.Size, - VirtualSize: s.graph.getParentsSize(image) + image.Size, - } - - imageInspect.GraphDriver.Name = s.graph.driver.String() - - graphDriverData, err := s.graph.driver.GetMetadata(image.ID) - if err != nil { - return nil, err - } - imageInspect.GraphDriver.Data = graphDriverData - return imageInspect, nil -} - -// imageTarLayer return the tarLayer of the image -func (s *TagStore) imageTarLayer(name string, dest io.Writer) error { - if image, err := s.LookupImage(name); err == nil && image != nil { - // On Windows, the base layer cannot be exported - if runtime.GOOS != "windows" || image.Parent != "" { - - fs, err := s.graph.tarLayer(image) - if err != nil { - return err - } - defer fs.Close() - - written, err := io.Copy(dest, fs) - if err != nil { - return err - } - logrus.Debugf("rendered layer for %s of [%d] size", image.ID, written) - } - return nil - } - return fmt.Errorf("No such image: %s", name) -} diff --git a/graph/tags.go b/graph/tags.go deleted file mode 100644 index 8c6faf0b53..0000000000 --- a/graph/tags.go +++ /dev/null @@ -1,431 +0,0 @@ -package graph - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "sync" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/events" - "github.com/docker/docker/graph/tags" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/broadcaster" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" - "github.com/docker/libtrust" -) - -// ErrNameIsNotExist returned when there is no image with requested name. -var ErrNameIsNotExist = errors.New("image with specified name does not exist") - -// TagStore manages repositories. It encompasses the Graph used for versioned -// storage, as well as various services involved in pushing and pulling -// repositories. -type TagStore struct { - path string - graph *Graph - // Repositories is a map of repositories, indexed by name. - Repositories map[string]repository - trustKey libtrust.PrivateKey - sync.Mutex - // FIXME: move push/pull-related fields - // to a helper type - pullingPool map[string]*broadcaster.Buffered - pushingPool map[string]*broadcaster.Buffered - registryService *registry.Service - eventsService *events.Events -} - -// repository maps tags to image IDs. -type repository map[string]string - -// TagStoreConfig provides parameters for a new TagStore. -type TagStoreConfig struct { - // Graph is the versioned image store - Graph *Graph - // Key is the private key to use for signing manifests. - Key libtrust.PrivateKey - // Registry is the registry service to use for TLS configuration and - // endpoint lookup. - Registry *registry.Service - // Events is the events service to use for logging. - Events *events.Events -} - -// NewTagStore creates a new TagStore at specified path, using the parameters -// and services provided in cfg. -func NewTagStore(path string, cfg *TagStoreConfig) (*TagStore, error) { - abspath, err := filepath.Abs(path) - if err != nil { - return nil, err - } - - store := &TagStore{ - path: abspath, - graph: cfg.Graph, - trustKey: cfg.Key, - Repositories: make(map[string]repository), - pullingPool: make(map[string]*broadcaster.Buffered), - pushingPool: make(map[string]*broadcaster.Buffered), - registryService: cfg.Registry, - eventsService: cfg.Events, - } - // Load the json file if it exists, otherwise create it. - if err := store.reload(); os.IsNotExist(err) { - if err := store.save(); err != nil { - return nil, err - } - } else if err != nil { - return nil, err - } - return store, nil -} - -func (store *TagStore) save() error { - // Store the json ball - jsonData, err := json.Marshal(store) - if err != nil { - return err - } - if err := ioutil.WriteFile(store.path, jsonData, 0600); err != nil { - return err - } - return nil -} - -func (store *TagStore) reload() error { - f, err := os.Open(store.path) - if err != nil { - return err - } - defer f.Close() - if err := json.NewDecoder(f).Decode(&store); err != nil { - return err - } - return nil -} - -// LookupImage returns pointer to an Image struct corresponding to the given -// name. The name can include an optional tag; otherwise the default tag will -// be used. -func (store *TagStore) LookupImage(name string) (*image.Image, error) { - repoName, ref := parsers.ParseRepositoryTag(name) - if ref == "" { - ref = tags.DefaultTag - } - var ( - err error - img *image.Image - ) - - img, err = store.getImage(repoName, ref) - if err != nil { - return nil, err - } - - if img != nil { - return img, nil - } - - // name must be an image ID. - store.Lock() - defer store.Unlock() - if img, err = store.graph.Get(name); err != nil { - return nil, err - } - - return img, nil -} - -// GetID returns ID for image name. -func (store *TagStore) GetID(name string) (string, error) { - repoName, ref := parsers.ParseRepositoryTag(name) - if ref == "" { - ref = tags.DefaultTag - } - store.Lock() - defer store.Unlock() - repoName = registry.NormalizeLocalName(repoName) - repo, ok := store.Repositories[repoName] - if !ok { - return "", ErrNameIsNotExist - } - id, ok := repo[ref] - if !ok { - return "", ErrNameIsNotExist - } - return id, nil -} - -// ByID returns a reverse-lookup table of all the names which refer to each -// image - e.g. {"43b5f19b10584": {"base:latest", "base:v1"}} -func (store *TagStore) ByID() map[string][]string { - store.Lock() - defer store.Unlock() - byID := make(map[string][]string) - for repoName, repository := range store.Repositories { - for tag, id := range repository { - name := utils.ImageReference(repoName, tag) - if _, exists := byID[id]; !exists { - byID[id] = []string{name} - } else { - byID[id] = append(byID[id], name) - sort.Strings(byID[id]) - } - } - } - return byID -} - -// HasReferences returns whether or not the given image is referenced in one or -// more repositories. -func (store *TagStore) HasReferences(img *image.Image) bool { - return len(store.ByID()[img.ID]) > 0 -} - -// Delete deletes a repository or a specific tag. If ref is empty, the entire -// repository named repoName will be deleted; otherwise only the tag named by -// ref will be deleted. -func (store *TagStore) Delete(repoName, ref string) (bool, error) { - store.Lock() - defer store.Unlock() - deleted := false - if err := store.reload(); err != nil { - return false, err - } - - if ref == "" { - // Delete the whole repository. - delete(store.Repositories, repoName) - return true, store.save() - } - - repoRefs, exists := store.Repositories[repoName] - - if !exists { - return false, fmt.Errorf("No such repository: %s", repoName) - } - - if _, exists := repoRefs[ref]; exists { - delete(repoRefs, ref) - if len(repoRefs) == 0 { - delete(store.Repositories, repoName) - } - deleted = true - } - - return deleted, store.save() -} - -// Tag creates a tag in the repository reponame, pointing to the image named -// imageName. If force is true, an existing tag with the same name may be -// overwritten. -func (store *TagStore) Tag(repoName, tag, imageName string, force bool) error { - return store.setLoad(repoName, tag, imageName, force, nil) -} - -// setLoad stores the image to the store. -// If the imageName is already in the repo then a '-f' flag should be used to replace existing image. -func (store *TagStore) setLoad(repoName, tag, imageName string, force bool, out io.Writer) error { - img, err := store.LookupImage(imageName) - store.Lock() - defer store.Unlock() - if err != nil { - return err - } - if tag == "" { - tag = tags.DefaultTag - } - if err := validateRepoName(repoName); err != nil { - return err - } - if err := tags.ValidateTagName(tag); err != nil { - return err - } - if err := store.reload(); err != nil { - return err - } - var repo repository - repoName = registry.NormalizeLocalName(repoName) - if r, exists := store.Repositories[repoName]; exists { - repo = r - if old, exists := store.Repositories[repoName][tag]; exists { - - if !force { - return fmt.Errorf("Conflict: Tag %s:%s is already set to image %s, if you want to replace it, please use -f option", repoName, tag, old[:12]) - } - - if old != img.ID && out != nil { - - fmt.Fprintf(out, "The image %s:%s already exists, renaming the old one with ID %s to empty string\n", repoName, tag, old[:12]) - - } - } - } else { - repo = make(map[string]string) - store.Repositories[repoName] = repo - } - repo[tag] = img.ID - return store.save() -} - -// setDigest creates a digest reference to an image ID. -func (store *TagStore) setDigest(repoName, digest, imageName string) error { - img, err := store.LookupImage(imageName) - if err != nil { - return err - } - - if err := validateRepoName(repoName); err != nil { - return err - } - - if err := validateDigest(digest); err != nil { - return err - } - - store.Lock() - defer store.Unlock() - if err := store.reload(); err != nil { - return err - } - - repoName = registry.NormalizeLocalName(repoName) - repoRefs, exists := store.Repositories[repoName] - if !exists { - repoRefs = repository{} - store.Repositories[repoName] = repoRefs - } else if oldID, exists := repoRefs[digest]; exists && oldID != img.ID { - return fmt.Errorf("Conflict: Digest %s is already set to image %s", digest, oldID) - } - - repoRefs[digest] = img.ID - return store.save() -} - -// get returns the repository tag/image map for a given repository. -func (store *TagStore) get(repoName string) (repository, error) { - store.Lock() - defer store.Unlock() - if err := store.reload(); err != nil { - return nil, err - } - repoName = registry.NormalizeLocalName(repoName) - if r, exists := store.Repositories[repoName]; exists { - return r, nil - } - return nil, nil -} - -// getImage returns a pointer to an Image structure describing the image -// referred to by refOrID inside repository repoName. -func (store *TagStore) getImage(repoName, refOrID string) (*image.Image, error) { - repo, err := store.get(repoName) - - if err != nil { - return nil, err - } - if repo == nil { - return nil, nil - } - - store.Lock() - defer store.Unlock() - if imgID, exists := repo[refOrID]; exists { - return store.graph.Get(imgID) - } - - // If no matching tag is found, search through images for a matching image id - // iff it looks like a short ID or would look like a short ID - if stringid.IsShortID(stringid.TruncateID(refOrID)) { - for _, revision := range repo { - if strings.HasPrefix(revision, refOrID) { - return store.graph.Get(revision) - } - } - } - - return nil, nil -} - -// validateRepoName validates the name of a repository. -func validateRepoName(name string) error { - if name == "" { - return fmt.Errorf("Repository name can't be empty") - } - if name == "scratch" { - return fmt.Errorf("'scratch' is a reserved name") - } - return nil -} - -func validateDigest(dgst string) error { - if dgst == "" { - return errors.New("digest can't be empty") - } - if _, err := digest.ParseDigest(dgst); err != nil { - return err - } - return nil -} - -// poolAdd checks if a push or pull is already running, and returns -// (broadcaster, true) if a running operation is found. Otherwise, it creates a -// new one and returns (broadcaster, false). -func (store *TagStore) poolAdd(kind, key string) (*broadcaster.Buffered, bool) { - store.Lock() - defer store.Unlock() - - if p, exists := store.pullingPool[key]; exists { - return p, true - } - if p, exists := store.pushingPool[key]; exists { - return p, true - } - - broadcaster := broadcaster.NewBuffered() - - switch kind { - case "pull": - store.pullingPool[key] = broadcaster - case "push": - store.pushingPool[key] = broadcaster - default: - panic("Unknown pool type") - } - - return broadcaster, false -} - -func (store *TagStore) poolRemoveWithError(kind, key string, broadcasterResult error) error { - store.Lock() - defer store.Unlock() - switch kind { - case "pull": - if broadcaster, exists := store.pullingPool[key]; exists { - broadcaster.CloseWithError(broadcasterResult) - delete(store.pullingPool, key) - } - case "push": - if broadcaster, exists := store.pushingPool[key]; exists { - broadcaster.CloseWithError(broadcasterResult) - delete(store.pushingPool, key) - } - default: - return fmt.Errorf("Unknown pool type") - } - return nil -} - -func (store *TagStore) poolRemove(kind, key string) error { - return store.poolRemoveWithError(kind, key, nil) -} diff --git a/graph/tags/tags.go b/graph/tags/tags.go deleted file mode 100644 index 4c9399b476..0000000000 --- a/graph/tags/tags.go +++ /dev/null @@ -1,36 +0,0 @@ -package tags - -import ( - "fmt" - "regexp" - - "github.com/docker/distribution/reference" -) - -// DefaultTag defines the default tag used when performing images related actions and no tag string is specified -const DefaultTag = "latest" - -var anchoredTagRegexp = regexp.MustCompile(`^` + reference.TagRegexp.String() + `$`) - -// ErrTagInvalidFormat is returned if tag is invalid. -type ErrTagInvalidFormat struct { - name string -} - -func (e ErrTagInvalidFormat) Error() string { - return fmt.Sprintf("Illegal tag name (%s): only [A-Za-z0-9_.-] are allowed ('.' and '-' are NOT allowed in the initial), minimum 1, maximum 128 in length", e.name) -} - -// ValidateTagName validates the name of a tag. -// It returns an error if the given name is an emtpy string. -// If name is not valid, it returns ErrTagInvalidFormat -func ValidateTagName(name string) error { - if name == "" { - return fmt.Errorf("tag name can't be empty") - } - - if !anchoredTagRegexp.MatchString(name) { - return ErrTagInvalidFormat{name} - } - return nil -} diff --git a/graph/tags/tags_unit_test.go b/graph/tags/tags_unit_test.go deleted file mode 100644 index 374e0f053c..0000000000 --- a/graph/tags/tags_unit_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package tags - -import ( - "testing" -) - -func TestValidTagName(t *testing.T) { - validTags := []string{"9", "foo", "foo-test", "bar.baz.boo"} - for _, tag := range validTags { - if err := ValidateTagName(tag); err != nil { - t.Errorf("'%s' should've been a valid tag", tag) - } - } -} - -func TestInvalidTagName(t *testing.T) { - inValidTags := []string{"-9", ".foo", "-test", ".", "-"} - for _, tag := range inValidTags { - if err := ValidateTagName(tag); err == nil { - t.Errorf("'%s' should've been an invalid tag", tag) - } - } -} diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go deleted file mode 100644 index 0406f15443..0000000000 --- a/graph/tags_unit_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package graph - -import ( - "archive/tar" - "bytes" - "io" - "os" - "path" - "testing" - - "github.com/docker/docker/daemon/events" - "github.com/docker/docker/daemon/graphdriver" - _ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests - "github.com/docker/docker/graph/tags" - "github.com/docker/docker/image" - "github.com/docker/docker/utils" -) - -const ( - testOfficialImageName = "myapp" - testOfficialImageID = "1a2d3c4d4e5fa2d2a21acea242a5e2345d3aefc3e7dfa2a2a2a21a2a2ad2d234" - testOfficialImageIDShort = "1a2d3c4d4e5f" - testPrivateImageName = "127.0.0.1:8000/privateapp" - testPrivateImageID = "5bc255f8699e4ee89ac4469266c3d11515da88fdcbde45d7b069b636ff4efd81" - testPrivateImageIDShort = "5bc255f8699e" - testPrivateImageDigest = "sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb" - testPrivateImageTag = "sometag" -) - -func fakeTar() (io.Reader, error) { - uid := os.Getuid() - gid := os.Getgid() - - content := []byte("Hello world!\n") - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { - hdr := new(tar.Header) - - // Leaving these fields blank requires root privileges - hdr.Uid = uid - hdr.Gid = gid - - hdr.Size = int64(len(content)) - hdr.Name = name - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - tw.Write([]byte(content)) - } - tw.Close() - return buf, nil -} - -func mkTestTagStore(root string, t *testing.T) *TagStore { - driver, err := graphdriver.New(root, nil, nil, nil) - if err != nil { - t.Fatal(err) - } - graph, err := NewGraph(root, driver, nil, nil) - if err != nil { - t.Fatal(err) - } - - tagCfg := &TagStoreConfig{ - Graph: graph, - Events: events.New(), - } - store, err := NewTagStore(path.Join(root, "tags"), tagCfg) - if err != nil { - t.Fatal(err) - } - officialArchive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - img := &image.Image{ID: testOfficialImageID} - if err := graph.Register(v1Descriptor{img}, officialArchive); err != nil { - t.Fatal(err) - } - if err := store.Tag(testOfficialImageName, "", testOfficialImageID, false); err != nil { - t.Fatal(err) - } - privateArchive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - img = &image.Image{ID: testPrivateImageID} - if err := graph.Register(v1Descriptor{img}, privateArchive); err != nil { - t.Fatal(err) - } - if err := store.Tag(testPrivateImageName, "", testPrivateImageID, false); err != nil { - t.Fatal(err) - } - if err := store.setDigest(testPrivateImageName, testPrivateImageDigest, testPrivateImageID); err != nil { - t.Fatal(err) - } - return store -} - -func TestLookupImage(t *testing.T) { - tmp, err := utils.TestDirectory("") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - store := mkTestTagStore(tmp, t) - defer store.graph.driver.Cleanup() - - officialLookups := []string{ - testOfficialImageID, - testOfficialImageIDShort, - testOfficialImageName + ":" + testOfficialImageID, - testOfficialImageName + ":" + testOfficialImageIDShort, - testOfficialImageName, - testOfficialImageName + ":" + tags.DefaultTag, - "docker.io/" + testOfficialImageName, - "docker.io/" + testOfficialImageName + ":" + tags.DefaultTag, - "index.docker.io/" + testOfficialImageName, - "index.docker.io/" + testOfficialImageName + ":" + tags.DefaultTag, - "library/" + testOfficialImageName, - "library/" + testOfficialImageName + ":" + tags.DefaultTag, - "docker.io/library/" + testOfficialImageName, - "docker.io/library/" + testOfficialImageName + ":" + tags.DefaultTag, - "index.docker.io/library/" + testOfficialImageName, - "index.docker.io/library/" + testOfficialImageName + ":" + tags.DefaultTag, - } - - privateLookups := []string{ - testPrivateImageID, - testPrivateImageIDShort, - testPrivateImageName + ":" + testPrivateImageID, - testPrivateImageName + ":" + testPrivateImageIDShort, - testPrivateImageName, - testPrivateImageName + ":" + tags.DefaultTag, - } - - invalidLookups := []string{ - testOfficialImageName + ":" + "fail", - "fail:fail", - } - - digestLookups := []string{ - testPrivateImageName + "@" + testPrivateImageDigest, - } - - for _, name := range officialLookups { - if img, err := store.LookupImage(name); err != nil { - t.Errorf("Error looking up %s: %s", name, err) - } else if img == nil { - t.Errorf("Expected 1 image, none found: %s", name) - } else if img.ID != testOfficialImageID { - t.Errorf("Expected ID '%s' found '%s'", testOfficialImageID, img.ID) - } - } - - for _, name := range privateLookups { - if img, err := store.LookupImage(name); err != nil { - t.Errorf("Error looking up %s: %s", name, err) - } else if img == nil { - t.Errorf("Expected 1 image, none found: %s", name) - } else if img.ID != testPrivateImageID { - t.Errorf("Expected ID '%s' found '%s'", testPrivateImageID, img.ID) - } - } - - for _, name := range invalidLookups { - if img, err := store.LookupImage(name); err == nil { - t.Errorf("Expected error, none found: %s", name) - } else if img != nil { - t.Errorf("Expected 0 image, 1 found: %s", name) - } - } - - for _, name := range digestLookups { - if img, err := store.LookupImage(name); err != nil { - t.Errorf("Error looking up %s: %s", name, err) - } else if img == nil { - t.Errorf("Expected 1 image, none found: %s", name) - } else if img.ID != testPrivateImageID { - t.Errorf("Expected ID '%s' found '%s'", testPrivateImageID, img.ID) - } - } -} - -func TestValidateDigest(t *testing.T) { - tests := []struct { - input string - expectError bool - }{ - {"", true}, - {"latest", true}, - {"sha256:b", false}, - {"tarsum+v1+sha256:bY852-_.+=", false}, - {"#$%#$^:$%^#$%", true}, - } - - for i, test := range tests { - err := validateDigest(test.input) - gotError := err != nil - if e, a := test.expectError, gotError; e != a { - t.Errorf("%d: with input %s, expected error=%t, got %t: %s", i, test.input, test.expectError, gotError, err) - } - } -}