Merge pull request #20407 from tiborvass/backmerge-1.10.1

Backmerge 1.10.1 fixes to master
This commit is contained in:
David Calavera 2016-02-17 13:45:43 -08:00
commit b26c21f704
8 changed files with 130 additions and 17 deletions

View file

@ -760,7 +760,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
migrationStart := time.Now() migrationStart := time.Now()
if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
return nil, err logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
} }
logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())

View file

@ -6,6 +6,7 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"testing" "testing"
"github.com/docker/distribution/digest" "github.com/docker/distribution/digest"
@ -56,7 +57,7 @@ func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) {
} }
} }
func newTestStore(t *testing.T) (Store, func()) { func newTestStore(t *testing.T) (Store, string, func()) {
td, err := ioutil.TempDir("", "layerstore-") td, err := ioutil.TempDir("", "layerstore-")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -72,7 +73,7 @@ func newTestStore(t *testing.T) (Store, func()) {
t.Fatal(err) t.Fatal(err)
} }
return ls, func() { return ls, td, func() {
graphcleanup() graphcleanup()
os.RemoveAll(td) os.RemoveAll(td)
} }
@ -265,7 +266,7 @@ func assertLayerEqual(t *testing.T, l1, l2 Layer) {
} }
func TestMountAndRegister(t *testing.T) { func TestMountAndRegister(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644))
@ -306,7 +307,7 @@ func TestMountAndRegister(t *testing.T) {
} }
func TestLayerRelease(t *testing.T) { func TestLayerRelease(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644)))
@ -351,7 +352,7 @@ func TestLayerRelease(t *testing.T) {
} }
func TestStoreRestore(t *testing.T) { func TestStoreRestore(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644)))
@ -472,7 +473,7 @@ func TestStoreRestore(t *testing.T) {
} }
func TestTarStreamStability(t *testing.T) { func TestTarStreamStability(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
files1 := []FileApplier{ files1 := []FileApplier{
@ -668,7 +669,7 @@ func assertActivityCount(t *testing.T, l RWLayer, expected int) {
} }
func TestRegisterExistingLayer(t *testing.T) { func TestRegisterExistingLayer(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
baseFiles := []FileApplier{ baseFiles := []FileApplier{
@ -702,3 +703,69 @@ func TestRegisterExistingLayer(t *testing.T) {
assertReferences(t, layer2a, layer2b) assertReferences(t, layer2a, layer2b)
} }
func TestTarStreamVerification(t *testing.T) {
ls, tmpdir, cleanup := newTestStore(t)
defer cleanup()
files1 := []FileApplier{
newTestFile("/foo", []byte("abc"), 0644),
newTestFile("/bar", []byte("def"), 0644),
}
files2 := []FileApplier{
newTestFile("/foo", []byte("abc"), 0644),
newTestFile("/bar", []byte("def"), 0600), // different perm
}
tar1, err := tarFromFiles(files1...)
if err != nil {
t.Fatal(err)
}
tar2, err := tarFromFiles(files2...)
if err != nil {
t.Fatal(err)
}
layer1, err := ls.Register(bytes.NewReader(tar1), "")
if err != nil {
t.Fatal(err)
}
layer2, err := ls.Register(bytes.NewReader(tar2), "")
if err != nil {
t.Fatal(err)
}
id1 := digest.Digest(layer1.ChainID())
id2 := digest.Digest(layer2.ChainID())
// Replace tar data files
src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz"))
if err != nil {
t.Fatal(err)
}
dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz"))
if err != nil {
t.Fatal(err)
}
if _, err := io.Copy(dst, src); err != nil {
t.Fatal(err)
}
src.Close()
dst.Close()
ts, err := layer2.TarStream()
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(ioutil.Discard, ts)
if err == nil {
t.Fatal("expected data verification to fail")
}
if !strings.Contains(err.Error(), "could not verify layer data") {
t.Fatalf("wrong error returned from tarstream: %q", err)
}
}

View file

@ -16,7 +16,7 @@ func graphDiffSize(ls Store, l Layer) (int64, error) {
// Unix as Windows graph driver does not support Changes which is indirectly // Unix as Windows graph driver does not support Changes which is indirectly
// invoked by calling DiffSize on the driver // invoked by calling DiffSize on the driver
func TestLayerSize(t *testing.T) { func TestLayerSize(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
content1 := []byte("Base contents") content1 := []byte("Base contents")

View file

@ -32,7 +32,7 @@ func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent
} }
if !ls.driver.Exists(graphID) { if !ls.driver.Exists(graphID) {
return errors.New("graph ID does not exist") return fmt.Errorf("graph ID does not exist: %q", graphID)
} }
var p *roLayer var p *roLayer

View file

@ -268,7 +268,7 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
} }
func TestMountMigration(t *testing.T) { func TestMountMigration(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
baseFiles := []FileApplier{ baseFiles := []FileApplier{

View file

@ -11,7 +11,7 @@ import (
) )
func TestMountInit(t *testing.T) { func TestMountInit(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) basefile := newTestFile("testfile.txt", []byte("base data!"), 0644)
@ -63,7 +63,7 @@ func TestMountInit(t *testing.T) {
} }
func TestMountSize(t *testing.T) { func TestMountSize(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
content1 := []byte("Base contents") content1 := []byte("Base contents")
@ -105,7 +105,7 @@ func TestMountSize(t *testing.T) {
} }
func TestMountChanges(t *testing.T) { func TestMountChanges(t *testing.T) {
ls, cleanup := newTestStore(t) ls, _, cleanup := newTestStore(t)
defer cleanup() defer cleanup()
basefiles := []FileApplier{ basefiles := []FileApplier{

View file

@ -1,6 +1,11 @@
package layer package layer
import "io" import (
"fmt"
"io"
"github.com/docker/distribution/digest"
)
type roLayer struct { type roLayer struct {
chainID ChainID chainID ChainID
@ -29,7 +34,11 @@ func (rl *roLayer) TarStream() (io.ReadCloser, error) {
pw.Close() pw.Close()
} }
}() }()
return pr, nil rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID))
if err != nil {
return nil, err
}
return rc, nil
} }
func (rl *roLayer) ChainID() ChainID { func (rl *roLayer) ChainID() ChainID {
@ -117,3 +126,39 @@ func storeLayer(tx MetadataTransaction, layer *roLayer) error {
return nil return nil
} }
func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) {
verifier, err := digest.NewDigestVerifier(dgst)
if err != nil {
return nil, err
}
return &verifiedReadCloser{
rc: rc,
dgst: dgst,
verifier: verifier,
}, nil
}
type verifiedReadCloser struct {
rc io.ReadCloser
dgst digest.Digest
verifier digest.Verifier
}
func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) {
n, err = vrc.rc.Read(p)
if n > 0 {
if n, err := vrc.verifier.Write(p[:n]); err != nil {
return n, err
}
}
if err == io.EOF {
if !vrc.verifier.Verified() {
err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst)
}
}
return
}
func (vrc *verifiedReadCloser) Close() error {
return vrc.rc.Close()
}

View file

@ -282,7 +282,8 @@ func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMapp
} }
if err := ls.CreateRWLayerByGraphID(id, id, img.RootFS.ChainID()); err != nil { if err := ls.CreateRWLayerByGraphID(id, id, img.RootFS.ChainID()); err != nil {
return err logrus.Errorf("migrate container error: %v", err)
continue
} }
logrus.Infof("migrated container %s to point to %s", id, imageID) logrus.Infof("migrated container %s to point to %s", id, imageID)