Przeglądaj źródła

Merge pull request #20164 from tonistiigi/verify-tarsteam-on-creation

Verify layer tarstream
Tibor Vass 9 lat temu
rodzic
commit
c4756a0c36
4 zmienionych plików z 126 dodań i 14 usunięć
  1. 75 8
      layer/layer_test.go
  2. 1 1
      layer/migration_test.go
  3. 3 3
      layer/mount_test.go
  4. 47 2
      layer/ro_layer.go

+ 75 - 8
layer/layer_test.go

@@ -6,6 +6,7 @@ import (
 	"io/ioutil"
 	"os"
 	"path/filepath"
+	"strings"
 	"testing"
 
 	"github.com/docker/distribution/digest"
@@ -56,7 +57,7 @@ func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) {
 	}
 }
 
-func newTestStore(t *testing.T) (Store, func()) {
+func newTestStore(t *testing.T) (Store, string, func()) {
 	td, err := ioutil.TempDir("", "layerstore-")
 	if err != nil {
 		t.Fatal(err)
@@ -72,7 +73,7 @@ func newTestStore(t *testing.T) (Store, func()) {
 		t.Fatal(err)
 	}
 
-	return ls, func() {
+	return ls, td, func() {
 		graphcleanup()
 		os.RemoveAll(td)
 	}
@@ -265,7 +266,7 @@ func assertLayerEqual(t *testing.T, l1, l2 Layer) {
 }
 
 func TestMountAndRegister(t *testing.T) {
-	ls, cleanup := newTestStore(t)
+	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 
 	li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644))
@@ -306,7 +307,7 @@ func TestMountAndRegister(t *testing.T) {
 }
 
 func TestLayerRelease(t *testing.T) {
-	ls, cleanup := newTestStore(t)
+	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 
 	layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644)))
@@ -351,7 +352,7 @@ func TestLayerRelease(t *testing.T) {
 }
 
 func TestStoreRestore(t *testing.T) {
-	ls, cleanup := newTestStore(t)
+	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 
 	layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644)))
@@ -472,7 +473,7 @@ func TestStoreRestore(t *testing.T) {
 }
 
 func TestTarStreamStability(t *testing.T) {
-	ls, cleanup := newTestStore(t)
+	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 
 	files1 := []FileApplier{
@@ -668,7 +669,7 @@ func assertActivityCount(t *testing.T, l RWLayer, expected int) {
 }
 
 func TestRegisterExistingLayer(t *testing.T) {
-	ls, cleanup := newTestStore(t)
+	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 
 	baseFiles := []FileApplier{
@@ -713,7 +714,7 @@ func graphDiffSize(ls Store, l Layer) (int64, error) {
 }
 
 func TestLayerSize(t *testing.T) {
-	ls, cleanup := newTestStore(t)
+	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 
 	content1 := []byte("Base contents")
@@ -765,3 +766,69 @@ func TestLayerSize(t *testing.T) {
 		t.Fatalf("Unexpected size %d, expected %d", layer2Size, expected)
 	}
 }
+
+func TestTarStreamVerification(t *testing.T) {
+	ls, tmpdir, cleanup := newTestStore(t)
+	defer cleanup()
+
+	files1 := []FileApplier{
+		newTestFile("/foo", []byte("abc"), 0644),
+		newTestFile("/bar", []byte("def"), 0644),
+	}
+	files2 := []FileApplier{
+		newTestFile("/foo", []byte("abc"), 0644),
+		newTestFile("/bar", []byte("def"), 0600), // different perm
+	}
+
+	tar1, err := tarFromFiles(files1...)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	tar2, err := tarFromFiles(files2...)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	layer1, err := ls.Register(bytes.NewReader(tar1), "")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	layer2, err := ls.Register(bytes.NewReader(tar2), "")
+	if err != nil {
+		t.Fatal(err)
+	}
+	id1 := digest.Digest(layer1.ChainID())
+	id2 := digest.Digest(layer2.ChainID())
+
+	// Replace tar data files
+	src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := io.Copy(dst, src); err != nil {
+		t.Fatal(err)
+	}
+
+	src.Close()
+	dst.Close()
+
+	ts, err := layer2.TarStream()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = io.Copy(ioutil.Discard, ts)
+	if err == nil {
+		t.Fatal("expected data verification to fail")
+	}
+	if !strings.Contains(err.Error(), "could not verify layer data") {
+		t.Fatalf("wrong error returned from tarstream: %q", err)
+	}
+}

+ 1 - 1
layer/migration_test.go

@@ -268,7 +268,7 @@ func TestLayerMigrationNoTarsplit(t *testing.T) {
 }
 
 func TestMountMigration(t *testing.T) {
-	ls, cleanup := newTestStore(t)
+	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 
 	baseFiles := []FileApplier{

+ 3 - 3
layer/mount_test.go

@@ -11,7 +11,7 @@ import (
 )
 
 func TestMountInit(t *testing.T) {
-	ls, cleanup := newTestStore(t)
+	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 
 	basefile := newTestFile("testfile.txt", []byte("base data!"), 0644)
@@ -63,7 +63,7 @@ func TestMountInit(t *testing.T) {
 }
 
 func TestMountSize(t *testing.T) {
-	ls, cleanup := newTestStore(t)
+	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 
 	content1 := []byte("Base contents")
@@ -105,7 +105,7 @@ func TestMountSize(t *testing.T) {
 }
 
 func TestMountChanges(t *testing.T) {
-	ls, cleanup := newTestStore(t)
+	ls, _, cleanup := newTestStore(t)
 	defer cleanup()
 
 	basefiles := []FileApplier{

+ 47 - 2
layer/ro_layer.go

@@ -1,6 +1,11 @@
 package layer
 
-import "io"
+import (
+	"fmt"
+	"io"
+
+	"github.com/docker/distribution/digest"
+)
 
 type roLayer struct {
 	chainID    ChainID
@@ -29,7 +34,11 @@ func (rl *roLayer) TarStream() (io.ReadCloser, error) {
 			pw.Close()
 		}
 	}()
-	return pr, nil
+	rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID))
+	if err != nil {
+		return nil, err
+	}
+	return rc, nil
 }
 
 func (rl *roLayer) ChainID() ChainID {
@@ -117,3 +126,39 @@ func storeLayer(tx MetadataTransaction, layer *roLayer) error {
 
 	return nil
 }
+
+func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) {
+	verifier, err := digest.NewDigestVerifier(dgst)
+	if err != nil {
+		return nil, err
+	}
+	return &verifiedReadCloser{
+		rc:       rc,
+		dgst:     dgst,
+		verifier: verifier,
+	}, nil
+}
+
+type verifiedReadCloser struct {
+	rc       io.ReadCloser
+	dgst     digest.Digest
+	verifier digest.Verifier
+}
+
+func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) {
+	n, err = vrc.rc.Read(p)
+	if n > 0 {
+		if n, err := vrc.verifier.Write(p[:n]); err != nil {
+			return n, err
+		}
+	}
+	if err == io.EOF {
+		if !vrc.verifier.Verified() {
+			err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst)
+		}
+	}
+	return
+}
+func (vrc *verifiedReadCloser) Close() error {
+	return vrc.rc.Close()
+}