Jelajahi Sumber

Merge pull request #16953 from aaronlehmann/master-security

Add 1.8.3 security fixes to master
Jess Frazelle 9 tahun lalu
induk
melakukan
eebf88ffd3
39 mengubah file dengan 1270 tambahan dan 885 penghapusan
  1. 0 6
      daemon/daemon.go
  2. 1 1
      daemon/graphdriver/imagerestorer.go
  3. 21 1
      daemon/graphdriver/windows/windows.go
  4. 10 7
      graph/export.go
  5. 38 0
      graph/fixtures/validate_manifest/bad_manifest
  6. 46 0
      graph/fixtures/validate_manifest/extra_data_manifest
  7. 38 0
      graph/fixtures/validate_manifest/good_manifest
  8. 22 0
      graph/fixtures/validate_manifest/no_signature_manifest
  9. 174 38
      graph/graph.go
  10. 17 7
      graph/graph_test.go
  11. 1 1
      graph/load.go
  12. 0 6
      graph/pull.go
  13. 8 3
      graph/pull_v1.go
  14. 375 103
      graph/pull_v2.go
  15. 26 0
      graph/pull_v2_test.go
  16. 61 13
      graph/push_v1.go
  17. 8 7
      graph/push_v2.go
  18. 3 2
      graph/registry.go
  19. 0 5
      graph/tags.go
  20. 2 9
      graph/tags_unit_test.go
  21. 1 0
      image/fixtures/post1.9/expected_computed_id
  22. 1 0
      image/fixtures/post1.9/expected_config
  23. 1 0
      image/fixtures/post1.9/layer_id
  24. 1 0
      image/fixtures/post1.9/parent_id
  25. 1 0
      image/fixtures/post1.9/v1compatibility
  26. 1 0
      image/fixtures/pre1.9/expected_computed_id
  27. 1 0
      image/fixtures/pre1.9/expected_config
  28. 1 0
      image/fixtures/pre1.9/layer_id
  29. 1 0
      image/fixtures/pre1.9/parent_id
  30. 1 0
      image/fixtures/pre1.9/v1compatibility
  31. 92 2
      image/image.go
  32. 55 0
      image/image_test.go
  33. 255 0
      integration-cli/docker_cli_pull_test.go
  34. 7 4
      runconfig/config.go
  35. 0 59
      trust/service.go
  36. 0 201
      trust/trusts.go
  37. 0 50
      vendor/src/github.com/docker/libtrust/trustgraph/graph.go
  38. 0 133
      vendor/src/github.com/docker/libtrust/trustgraph/memory_graph.go
  39. 0 227
      vendor/src/github.com/docker/libtrust/trustgraph/statement.go

+ 0 - 6
daemon/daemon.go

@@ -50,7 +50,6 @@ import (
 	"github.com/docker/docker/pkg/truncindex"
 	"github.com/docker/docker/pkg/truncindex"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
-	"github.com/docker/docker/trust"
 	volumedrivers "github.com/docker/docker/volume/drivers"
 	volumedrivers "github.com/docker/docker/volume/drivers"
 	"github.com/docker/docker/volume/local"
 	"github.com/docker/docker/volume/local"
 	"github.com/docker/docker/volume/store"
 	"github.com/docker/docker/volume/store"
@@ -738,10 +737,6 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
 	if err := system.MkdirAll(trustDir, 0700); err != nil {
 	if err := system.MkdirAll(trustDir, 0700); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	trustService, err := trust.NewStore(trustDir)
-	if err != nil {
-		return nil, fmt.Errorf("could not create trust store: %s", err)
-	}
 
 
 	eventsService := events.New()
 	eventsService := events.New()
 	logrus.Debug("Creating repository list")
 	logrus.Debug("Creating repository list")
@@ -750,7 +745,6 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
 		Key:      trustKey,
 		Key:      trustKey,
 		Registry: registryService,
 		Registry: registryService,
 		Events:   eventsService,
 		Events:   eventsService,
-		Trust:    trustService,
 	}
 	}
 	repositories, err := graph.NewTagStore(filepath.Join(config.Root, "repositories-"+d.driver.String()), tagCfg)
 	repositories, err := graph.NewTagStore(filepath.Join(config.Root, "repositories-"+d.driver.String()), tagCfg)
 	if err != nil {
 	if err != nil {

+ 1 - 1
daemon/graphdriver/imagerestorer.go

@@ -27,5 +27,5 @@ type Tagger interface {
 // functions without needing to import graph.
 // functions without needing to import graph.
 type Recorder interface {
 type Recorder interface {
 	Exists(id string) bool
 	Exists(id string) bool
-	Register(img *image.Image, layerData io.Reader) error
+	Register(img image.Descriptor, layerData io.Reader) error
 }
 }

+ 21 - 1
daemon/graphdriver/windows/windows.go

@@ -40,6 +40,26 @@ const (
 	filterDriver
 	filterDriver
 )
 )
 
 
+// CustomImageDescriptor is an image descriptor for use by RestoreCustomImages
+type customImageDescriptor struct {
+	img *image.Image
+}
+
+// ID returns the image ID specified in the image structure.
+func (img customImageDescriptor) ID() string {
+	return img.img.ID
+}
+
+// Parent returns the parent ID - in this case, none
+func (img customImageDescriptor) Parent() string {
+	return ""
+}
+
+// MarshalConfig renders the image structure into JSON.
+func (img customImageDescriptor) MarshalConfig() ([]byte, error) {
+	return json.Marshal(img.img)
+}
+
 // Driver represents a windows graph driver.
 // Driver represents a windows graph driver.
 type Driver struct {
 type Driver struct {
 	// info stores the shim driver information
 	// info stores the shim driver information
@@ -426,7 +446,7 @@ func (d *Driver) RestoreCustomImages(tagger graphdriver.Tagger, recorder graphdr
 				Size:          imageData.Size,
 				Size:          imageData.Size,
 			}
 			}
 
 
-			if err := recorder.Register(img, nil); err != nil {
+			if err := recorder.Register(customImageDescriptor{img}, nil); err != nil {
 				return nil, err
 				return nil, err
 			}
 			}
 
 

+ 10 - 7
graph/export.go

@@ -112,6 +112,11 @@ func (s *TagStore) ImageExport(names []string, outStream io.Writer) error {
 
 
 func (s *TagStore) exportImage(name, tempdir string) error {
 func (s *TagStore) exportImage(name, tempdir string) error {
 	for n := name; n != ""; {
 	for n := name; n != ""; {
+		img, err := s.LookupImage(n)
+		if err != nil || img == nil {
+			return fmt.Errorf("No such image %s", n)
+		}
+
 		// temporary directory
 		// temporary directory
 		tmpImageDir := filepath.Join(tempdir, n)
 		tmpImageDir := filepath.Join(tempdir, n)
 		if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil {
 		if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil {
@@ -128,19 +133,17 @@ func (s *TagStore) exportImage(name, tempdir string) error {
 			return err
 			return err
 		}
 		}
 
 
-		// serialize json
-		json, err := os.Create(filepath.Join(tmpImageDir, "json"))
+		imageInspectRaw, err := json.Marshal(img)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
-		img, err := s.LookupImage(n)
-		if err != nil || img == nil {
-			return fmt.Errorf("No such image %s", n)
-		}
-		imageInspectRaw, err := s.graph.RawJSON(img.ID)
+
+		// serialize json
+		json, err := os.Create(filepath.Join(tmpImageDir, "json"))
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
+
 		written, err := json.Write(imageInspectRaw)
 		written, err := json.Write(imageInspectRaw)
 		if err != nil {
 		if err != nil {
 			return err
 			return err

+ 38 - 0
graph/fixtures/validate_manifest/bad_manifest

@@ -0,0 +1,38 @@
+{
+   "schemaVersion": 2,
+   "name": "library/hello-world",
+   "tag": "latest",
+   "architecture": "amd64",
+   "fsLayers": [
+      {
+         "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+      },
+      {
+         "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
+      }
+   ],
+   "history": [
+      {
+         "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
+      },
+      {
+         "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n"
+      }
+   ],
+   "signatures": [
+      {
+         "header": {
+            "jwk": {
+               "crv": "P-256",
+               "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4",
+               "kty": "EC",
+               "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ",
+               "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8"
+            },
+            "alg": "ES256"
+         },
+         "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A",
+         "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ"
+      }
+   ]
+}

+ 46 - 0
graph/fixtures/validate_manifest/extra_data_manifest

@@ -0,0 +1,46 @@
+{
+   "schemaVersion": 1,
+   "name": "library/hello-world",
+   "tag": "latest",
+   "architecture": "amd64",
+   "fsLayers": [
+      {
+         "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+      },
+      {
+         "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
+      }
+   ],
+   "history": [
+      {
+         "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
+      },
+      {
+         "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n"
+      }
+   ],
+   "fsLayers": [
+      {
+         "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+      },
+      {
+         "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
+      }
+   ],
+   "signatures": [
+      {
+         "header": {
+            "jwk": {
+               "crv": "P-256",
+               "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4",
+               "kty": "EC",
+               "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ",
+               "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8"
+            },
+            "alg": "ES256"
+         },
+         "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A",
+         "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ"
+      }
+   ]
+}

+ 38 - 0
graph/fixtures/validate_manifest/good_manifest

@@ -0,0 +1,38 @@
+{
+   "schemaVersion": 1,
+   "name": "library/hello-world",
+   "tag": "latest",
+   "architecture": "amd64",
+   "fsLayers": [
+      {
+         "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+      },
+      {
+         "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
+      }
+   ],
+   "history": [
+      {
+         "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
+      },
+      {
+         "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n"
+      }
+   ],
+   "signatures": [
+      {
+         "header": {
+            "jwk": {
+               "crv": "P-256",
+               "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4",
+               "kty": "EC",
+               "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ",
+               "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8"
+            },
+            "alg": "ES256"
+         },
+         "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A",
+         "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ"
+      }
+   ]
+}

+ 22 - 0
graph/fixtures/validate_manifest/no_signature_manifest

@@ -0,0 +1,22 @@
+{
+   "schemaVersion": 1,
+   "name": "library/hello-world",
+   "tag": "latest",
+   "architecture": "amd64",
+   "fsLayers": [
+      {
+         "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"
+      },
+      {
+         "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"
+      }
+   ],
+   "history": [
+      {
+         "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"
+      },
+      {
+         "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n"
+      }
+   ]
+}

+ 174 - 38
graph/graph.go

@@ -31,6 +31,26 @@ import (
 	"github.com/vbatts/tar-split/tar/storage"
 	"github.com/vbatts/tar-split/tar/storage"
 )
 )
 
 
+// v1Descriptor is a non-content-addressable image descriptor
+type v1Descriptor struct {
+	img *image.Image
+}
+
+// ID returns the image ID specified in the image structure.
+func (img v1Descriptor) ID() string {
+	return img.img.ID
+}
+
+// Parent returns the parent ID specified in the image structure.
+func (img v1Descriptor) Parent() string {
+	return img.img.Parent
+}
+
+// MarshalConfig renders the image structure into JSON.
+func (img v1Descriptor) MarshalConfig() ([]byte, error) {
+	return json.Marshal(img.img)
+}
+
 // The type is used to protect pulling or building related image
 // The type is used to protect pulling or building related image
 // layers from deleteing when filtered by dangling=true
 // layers from deleteing when filtered by dangling=true
 // The key of layers is the images ID which is pulling or building
 // The key of layers is the images ID which is pulling or building
@@ -88,10 +108,12 @@ type Graph struct {
 
 
 // file names for ./graph/<ID>/
 // file names for ./graph/<ID>/
 const (
 const (
-	jsonFileName      = "json"
-	layersizeFileName = "layersize"
-	digestFileName    = "checksum"
-	tarDataFileName   = "tar-data.json.gz"
+	jsonFileName            = "json"
+	layersizeFileName       = "layersize"
+	digestFileName          = "checksum"
+	tarDataFileName         = "tar-data.json.gz"
+	v1CompatibilityFileName = "v1Compatibility"
+	parentFileName          = "parent"
 )
 )
 
 
 var (
 var (
@@ -225,7 +247,7 @@ func (graph *Graph) Create(layerData io.Reader, containerID, containerImage, com
 		img.ContainerConfig = *containerConfig
 		img.ContainerConfig = *containerConfig
 	}
 	}
 
 
-	if err := graph.Register(img, layerData); err != nil {
+	if err := graph.Register(v1Descriptor{img}, layerData); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	return img, nil
 	return img, nil
@@ -233,19 +255,26 @@ func (graph *Graph) Create(layerData io.Reader, containerID, containerImage, com
 
 
 // Register imports a pre-existing image into the graph.
 // Register imports a pre-existing image into the graph.
 // Returns nil if the image is already registered.
 // Returns nil if the image is already registered.
-func (graph *Graph) Register(img *image.Image, layerData io.Reader) (err error) {
+func (graph *Graph) Register(im image.Descriptor, layerData io.Reader) (err error) {
+	imgID := im.ID()
 
 
-	if err := image.ValidateID(img.ID); err != nil {
+	if err := image.ValidateID(imgID); err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	// We need this entire operation to be atomic within the engine. Note that
 	// We need this entire operation to be atomic within the engine. Note that
 	// this doesn't mean Register is fully safe yet.
 	// this doesn't mean Register is fully safe yet.
-	graph.imageMutex.Lock(img.ID)
-	defer graph.imageMutex.Unlock(img.ID)
+	graph.imageMutex.Lock(imgID)
+	defer graph.imageMutex.Unlock(imgID)
+
+	return graph.register(im, layerData)
+}
+
+func (graph *Graph) register(im image.Descriptor, layerData io.Reader) (err error) {
+	imgID := im.ID()
 
 
 	// Skip register if image is already registered
 	// Skip register if image is already registered
-	if graph.Exists(img.ID) {
+	if graph.Exists(imgID) {
 		return nil
 		return nil
 	}
 	}
 
 
@@ -255,14 +284,14 @@ func (graph *Graph) Register(img *image.Image, layerData io.Reader) (err error)
 		// If any error occurs, remove the new dir from the driver.
 		// If any error occurs, remove the new dir from the driver.
 		// Don't check for errors since the dir might not have been created.
 		// Don't check for errors since the dir might not have been created.
 		if err != nil {
 		if err != nil {
-			graph.driver.Remove(img.ID)
+			graph.driver.Remove(imgID)
 		}
 		}
 	}()
 	}()
 
 
 	// Ensure that the image root does not exist on the filesystem
 	// Ensure that the image root does not exist on the filesystem
 	// when it is not registered in the graph.
 	// when it is not registered in the graph.
 	// This is common when you switch from one graph driver to another
 	// This is common when you switch from one graph driver to another
-	if err := os.RemoveAll(graph.imageRoot(img.ID)); err != nil && !os.IsNotExist(err) {
+	if err := os.RemoveAll(graph.imageRoot(imgID)); err != nil && !os.IsNotExist(err) {
 		return err
 		return err
 	}
 	}
 
 
@@ -270,7 +299,7 @@ func (graph *Graph) Register(img *image.Image, layerData io.Reader) (err error)
 	// (the graph is the source of truth).
 	// (the graph is the source of truth).
 	// Ignore errors, since we don't know if the driver correctly returns ErrNotExist.
 	// Ignore errors, since we don't know if the driver correctly returns ErrNotExist.
 	// (FIXME: make that mandatory for drivers).
 	// (FIXME: make that mandatory for drivers).
-	graph.driver.Remove(img.ID)
+	graph.driver.Remove(imgID)
 
 
 	tmp, err := graph.mktemp()
 	tmp, err := graph.mktemp()
 	defer os.RemoveAll(tmp)
 	defer os.RemoveAll(tmp)
@@ -278,26 +307,32 @@ func (graph *Graph) Register(img *image.Image, layerData io.Reader) (err error)
 		return fmt.Errorf("mktemp failed: %s", err)
 		return fmt.Errorf("mktemp failed: %s", err)
 	}
 	}
 
 
+	parent := im.Parent()
+
 	// Create root filesystem in the driver
 	// Create root filesystem in the driver
-	if err := createRootFilesystemInDriver(graph, img); err != nil {
+	if err := createRootFilesystemInDriver(graph, imgID, parent, layerData); err != nil {
 		return err
 		return err
 	}
 	}
 
 
 	// Apply the diff/layer
 	// Apply the diff/layer
-	if err := graph.storeImage(img, layerData, tmp); err != nil {
+	config, err := im.MarshalConfig()
+	if err != nil {
+		return err
+	}
+	if err := graph.storeImage(imgID, parent, config, layerData, tmp); err != nil {
 		return err
 		return err
 	}
 	}
 	// Commit
 	// Commit
-	if err := os.Rename(tmp, graph.imageRoot(img.ID)); err != nil {
+	if err := os.Rename(tmp, graph.imageRoot(imgID)); err != nil {
 		return err
 		return err
 	}
 	}
-	graph.idIndex.Add(img.ID)
+	graph.idIndex.Add(imgID)
 	return nil
 	return nil
 }
 }
 
 
-func createRootFilesystemInDriver(graph *Graph, img *image.Image) error {
-	if err := graph.driver.Create(img.ID, img.Parent); err != nil {
-		return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err)
+func createRootFilesystemInDriver(graph *Graph, id, parent string, layerData io.Reader) error {
+	if err := graph.driver.Create(id, parent); err != nil {
+		return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, id, err)
 	}
 	}
 	return nil
 	return nil
 }
 }
@@ -480,6 +515,21 @@ func (graph *Graph) loadImage(id string) (*image.Image, error) {
 	if err := dec.Decode(img); err != nil {
 	if err := dec.Decode(img); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
+
+	if img.ID == "" {
+		img.ID = id
+	}
+
+	if img.Parent == "" && img.ParentID != "" && img.ParentID.Validate() == nil {
+		img.Parent = img.ParentID.Hex()
+	}
+
+	// compatibilityID for parent
+	parent, err := ioutil.ReadFile(filepath.Join(root, parentFileName))
+	if err == nil && len(parent) > 0 {
+		img.Parent = string(parent)
+	}
+
 	if err := image.ValidateID(img.ID); err != nil {
 	if err := image.ValidateID(img.ID); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -513,11 +563,14 @@ func (graph *Graph) saveSize(root string, size int64) error {
 	return nil
 	return nil
 }
 }
 
 
-// SetDigest sets the digest for the image layer to the provided value.
-func (graph *Graph) SetDigest(id string, dgst digest.Digest) error {
+// SetLayerDigest sets the digest for the image layer to the provided value.
+func (graph *Graph) SetLayerDigest(id string, dgst digest.Digest) error {
 	graph.imageMutex.Lock(id)
 	graph.imageMutex.Lock(id)
 	defer graph.imageMutex.Unlock(id)
 	defer graph.imageMutex.Unlock(id)
 
 
+	return graph.setLayerDigest(id, dgst)
+}
+func (graph *Graph) setLayerDigest(id string, dgst digest.Digest) error {
 	root := graph.imageRoot(id)
 	root := graph.imageRoot(id)
 	if err := ioutil.WriteFile(filepath.Join(root, digestFileName), []byte(dgst.String()), 0600); err != nil {
 	if err := ioutil.WriteFile(filepath.Join(root, digestFileName), []byte(dgst.String()), 0600); err != nil {
 		return fmt.Errorf("Error storing digest in %s/%s: %s", root, digestFileName, err)
 		return fmt.Errorf("Error storing digest in %s/%s: %s", root, digestFileName, err)
@@ -525,11 +578,15 @@ func (graph *Graph) SetDigest(id string, dgst digest.Digest) error {
 	return nil
 	return nil
 }
 }
 
 
-// GetDigest gets the digest for the provide image layer id.
-func (graph *Graph) GetDigest(id string) (digest.Digest, error) {
+// GetLayerDigest gets the digest for the provide image layer id.
+func (graph *Graph) GetLayerDigest(id string) (digest.Digest, error) {
 	graph.imageMutex.Lock(id)
 	graph.imageMutex.Lock(id)
 	defer graph.imageMutex.Unlock(id)
 	defer graph.imageMutex.Unlock(id)
 
 
+	return graph.getLayerDigest(id)
+}
+
+func (graph *Graph) getLayerDigest(id string) (digest.Digest, error) {
 	root := graph.imageRoot(id)
 	root := graph.imageRoot(id)
 	cs, err := ioutil.ReadFile(filepath.Join(root, digestFileName))
 	cs, err := ioutil.ReadFile(filepath.Join(root, digestFileName))
 	if err != nil {
 	if err != nil {
@@ -541,6 +598,76 @@ func (graph *Graph) GetDigest(id string) (digest.Digest, error) {
 	return digest.ParseDigest(string(cs))
 	return digest.ParseDigest(string(cs))
 }
 }
 
 
+// SetV1CompatibilityConfig stores the v1Compatibility JSON data associated
+// with the image in the manifest to the disk
+func (graph *Graph) SetV1CompatibilityConfig(id string, data []byte) error {
+	graph.imageMutex.Lock(id)
+	defer graph.imageMutex.Unlock(id)
+
+	return graph.setV1CompatibilityConfig(id, data)
+}
+func (graph *Graph) setV1CompatibilityConfig(id string, data []byte) error {
+	root := graph.imageRoot(id)
+	return ioutil.WriteFile(filepath.Join(root, v1CompatibilityFileName), data, 0600)
+}
+
+// GetV1CompatibilityConfig reads the v1Compatibility JSON data for the image
+// from the disk
+func (graph *Graph) GetV1CompatibilityConfig(id string) ([]byte, error) {
+	graph.imageMutex.Lock(id)
+	defer graph.imageMutex.Unlock(id)
+
+	return graph.getV1CompatibilityConfig(id)
+}
+
+func (graph *Graph) getV1CompatibilityConfig(id string) ([]byte, error) {
+	root := graph.imageRoot(id)
+	return ioutil.ReadFile(filepath.Join(root, v1CompatibilityFileName))
+}
+
+// GenerateV1CompatibilityChain makes sure v1Compatibility JSON data exists
+// for the image. If it doesn't it generates and stores it for the image and
+// all of it's parents based on the image config JSON.
+func (graph *Graph) GenerateV1CompatibilityChain(id string) ([]byte, error) {
+	graph.imageMutex.Lock(id)
+	defer graph.imageMutex.Unlock(id)
+
+	if v1config, err := graph.getV1CompatibilityConfig(id); err == nil {
+		return v1config, nil
+	}
+
+	// generate new, store it to disk
+	img, err := graph.Get(id)
+	if err != nil {
+		return nil, err
+	}
+
+	digestPrefix := string(digest.Canonical) + ":"
+	img.ID = strings.TrimPrefix(img.ID, digestPrefix)
+
+	if img.Parent != "" {
+		parentConfig, err := graph.GenerateV1CompatibilityChain(img.Parent)
+		if err != nil {
+			return nil, err
+		}
+		var parent struct{ ID string }
+		err = json.Unmarshal(parentConfig, &parent)
+		if err != nil {
+			return nil, err
+		}
+		img.Parent = parent.ID
+	}
+
+	json, err := json.Marshal(img)
+	if err != nil {
+		return nil, err
+	}
+	if err := graph.setV1CompatibilityConfig(id, json); err != nil {
+		return nil, err
+	}
+	return json, nil
+}
+
 // RawJSON returns the JSON representation for an image as a byte array.
 // RawJSON returns the JSON representation for an image as a byte array.
 func (graph *Graph) RawJSON(id string) ([]byte, error) {
 func (graph *Graph) RawJSON(id string) ([]byte, error) {
 	root := graph.imageRoot(id)
 	root := graph.imageRoot(id)
@@ -560,29 +687,38 @@ func jsonPath(root string) string {
 // storeImage stores file system layer data for the given image to the
 // storeImage stores file system layer data for the given image to the
 // graph's storage driver. Image metadata is stored in a file
 // graph's storage driver. Image metadata is stored in a file
 // at the specified root directory.
 // at the specified root directory.
-func (graph *Graph) storeImage(img *image.Image, layerData io.Reader, root string) (err error) {
+func (graph *Graph) storeImage(id, parent string, config []byte, layerData io.Reader, root string) (err error) {
+	var size int64
 	// Store the layer. If layerData is not nil, unpack it into the new layer
 	// Store the layer. If layerData is not nil, unpack it into the new layer
 	if layerData != nil {
 	if layerData != nil {
-		if err := graph.disassembleAndApplyTarLayer(img, layerData, root); err != nil {
+		if size, err = graph.disassembleAndApplyTarLayer(id, parent, layerData, root); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}
 
 
-	if err := graph.saveSize(root, img.Size); err != nil {
+	if err := graph.saveSize(root, size); err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	f, err := os.OpenFile(jsonPath(root), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600))
-	if err != nil {
+	if err := ioutil.WriteFile(jsonPath(root), config, 0600); err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	defer f.Close()
+	// If image is pointing to a parent via CompatibilityID write the reference to disk
+	img, err := image.NewImgJSON(config)
+	if err != nil {
+		return err
+	}
 
 
-	return json.NewEncoder(f).Encode(img)
+	if img.ParentID.Validate() == nil && parent != img.ParentID.Hex() {
+		if err := ioutil.WriteFile(filepath.Join(root, parentFileName), []byte(parent), 0600); err != nil {
+			return err
+		}
+	}
+	return nil
 }
 }
 
 
-func (graph *Graph) disassembleAndApplyTarLayer(img *image.Image, layerData io.Reader, root string) (err error) {
+func (graph *Graph) disassembleAndApplyTarLayer(id, parent string, layerData io.Reader, root string) (size int64, err error) {
 	var ar io.Reader
 	var ar io.Reader
 
 
 	if graph.tarSplitDisabled {
 	if graph.tarSplitDisabled {
@@ -591,7 +727,7 @@ func (graph *Graph) disassembleAndApplyTarLayer(img *image.Image, layerData io.R
 		// this is saving the tar-split metadata
 		// this is saving the tar-split metadata
 		mf, err := os.OpenFile(filepath.Join(root, tarDataFileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600))
 		mf, err := os.OpenFile(filepath.Join(root, tarDataFileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600))
 		if err != nil {
 		if err != nil {
-			return err
+			return 0, err
 		}
 		}
 
 
 		mfz := gzip.NewWriter(mf)
 		mfz := gzip.NewWriter(mf)
@@ -601,24 +737,24 @@ func (graph *Graph) disassembleAndApplyTarLayer(img *image.Image, layerData io.R
 
 
 		inflatedLayerData, err := archive.DecompressStream(layerData)
 		inflatedLayerData, err := archive.DecompressStream(layerData)
 		if err != nil {
 		if err != nil {
-			return err
+			return 0, err
 		}
 		}
 
 
 		// we're passing nil here for the file putter, because the ApplyDiff will
 		// we're passing nil here for the file putter, because the ApplyDiff will
 		// handle the extraction of the archive
 		// handle the extraction of the archive
 		rdr, err := asm.NewInputTarStream(inflatedLayerData, metaPacker, nil)
 		rdr, err := asm.NewInputTarStream(inflatedLayerData, metaPacker, nil)
 		if err != nil {
 		if err != nil {
-			return err
+			return 0, err
 		}
 		}
 
 
 		ar = archive.Reader(rdr)
 		ar = archive.Reader(rdr)
 	}
 	}
 
 
-	if img.Size, err = graph.driver.ApplyDiff(img.ID, img.Parent, ar); err != nil {
-		return err
+	if size, err = graph.driver.ApplyDiff(id, parent, ar); err != nil {
+		return 0, err
 	}
 	}
 
 
-	return nil
+	return
 }
 }
 
 
 func (graph *Graph) assembleTarLayer(img *image.Image) (io.ReadCloser, error) {
 func (graph *Graph) assembleTarLayer(img *image.Image) (io.ReadCloser, error) {

+ 17 - 7
graph/graph_test.go

@@ -73,7 +73,7 @@ func TestInterruptedRegister(t *testing.T) {
 		Created: time.Now(),
 		Created: time.Now(),
 	}
 	}
 	w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling)
 	w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling)
-	graph.Register(image, badArchive)
+	graph.Register(v1Descriptor{image}, badArchive)
 	if _, err := graph.Get(image.ID); err == nil {
 	if _, err := graph.Get(image.ID); err == nil {
 		t.Fatal("Image should not exist after Register is interrupted")
 		t.Fatal("Image should not exist after Register is interrupted")
 	}
 	}
@@ -82,7 +82,7 @@ func TestInterruptedRegister(t *testing.T) {
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
-	if err := graph.Register(image, goodArchive); err != nil {
+	if err := graph.Register(v1Descriptor{image}, goodArchive); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 }
 }
@@ -130,7 +130,7 @@ func TestRegister(t *testing.T) {
 		Comment: "testing",
 		Comment: "testing",
 		Created: time.Now(),
 		Created: time.Now(),
 	}
 	}
-	err = graph.Register(image, archive)
+	err = graph.Register(v1Descriptor{image}, archive)
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
@@ -212,7 +212,7 @@ func TestDelete(t *testing.T) {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	// Test delete twice (pull -> rm -> pull -> rm)
 	// Test delete twice (pull -> rm -> pull -> rm)
-	if err := graph.Register(img1, archive); err != nil {
+	if err := graph.Register(v1Descriptor{img1}, archive); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	if err := graph.Delete(img1.ID); err != nil {
 	if err := graph.Delete(img1.ID); err != nil {
@@ -246,9 +246,19 @@ func TestByParent(t *testing.T) {
 		Created: time.Now(),
 		Created: time.Now(),
 		Parent:  parentImage.ID,
 		Parent:  parentImage.ID,
 	}
 	}
-	_ = graph.Register(parentImage, archive1)
-	_ = graph.Register(childImage1, archive2)
-	_ = graph.Register(childImage2, archive3)
+
+	err := graph.Register(v1Descriptor{parentImage}, archive1)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = graph.Register(v1Descriptor{childImage1}, archive2)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = graph.Register(v1Descriptor{childImage2}, archive3)
+	if err != nil {
+		t.Fatal(err)
+	}
 
 
 	byParent := graph.ByParent()
 	byParent := graph.ByParent()
 	numChildren := len(byParent[parentImage.ID])
 	numChildren := len(byParent[parentImage.ID])

+ 1 - 1
graph/load.go

@@ -122,7 +122,7 @@ func (s *TagStore) recursiveLoad(address, tmpImageDir string) error {
 				}
 				}
 			}
 			}
 		}
 		}
-		if err := s.graph.Register(img, layer); err != nil {
+		if err := s.graph.Register(v1Descriptor{img}, layer); err != nil {
 			return err
 			return err
 		}
 		}
 	}
 	}

+ 0 - 6
graph/pull.go

@@ -101,12 +101,6 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf
 	for _, endpoint := range endpoints {
 	for _, endpoint := range endpoints {
 		logrus.Debugf("Trying to pull %s from %s %s", repoInfo.LocalName, endpoint.URL, endpoint.Version)
 		logrus.Debugf("Trying to pull %s from %s %s", repoInfo.LocalName, endpoint.URL, endpoint.Version)
 
 
-		if !endpoint.Mirror && (endpoint.Official || endpoint.Version == registry.APIVersion2) {
-			if repoInfo.Official {
-				s.trustService.UpdateBase()
-			}
-		}
-
 		puller, err := NewPuller(s, endpoint, repoInfo, imagePullConfig, sf)
 		puller, err := NewPuller(s, endpoint, repoInfo, imagePullConfig, sf)
 		if err != nil {
 		if err != nil {
 			lastErr = err
 			lastErr = err

+ 8 - 3
graph/pull_v1.go

@@ -127,7 +127,7 @@ func (p *v1Puller) pullRepository(askedTag string) error {
 	defer func() {
 	defer func() {
 		p.graph.Release(sessionID, imgIDs...)
 		p.graph.Release(sessionID, imgIDs...)
 	}()
 	}()
-	for _, image := range repoData.ImgList {
+	for _, imgData := range repoData.ImgList {
 		downloadImage := func(img *registry.ImgData) {
 		downloadImage := func(img *registry.ImgData) {
 			if askedTag != "" && img.Tag != askedTag {
 			if askedTag != "" && img.Tag != askedTag {
 				errors <- nil
 				errors <- nil
@@ -140,6 +140,11 @@ func (p *v1Puller) pullRepository(askedTag string) error {
 				return
 				return
 			}
 			}
 
 
+			if err := image.ValidateID(img.ID); err != nil {
+				errors <- err
+				return
+			}
+
 			// ensure no two downloads of the same image happen at the same time
 			// ensure no two downloads of the same image happen at the same time
 			poolKey := "img:" + img.ID
 			poolKey := "img:" + img.ID
 			broadcaster, found := p.poolAdd("pull", poolKey)
 			broadcaster, found := p.poolAdd("pull", poolKey)
@@ -197,7 +202,7 @@ func (p *v1Puller) pullRepository(askedTag string) error {
 			errors <- nil
 			errors <- nil
 		}
 		}
 
 
-		go downloadImage(image)
+		go downloadImage(imgData)
 	}
 	}
 
 
 	var lastError error
 	var lastError error
@@ -317,7 +322,7 @@ func (p *v1Puller) pullImage(out io.Writer, imgID, endpoint string) (layersDownl
 				layersDownloaded = true
 				layersDownloaded = true
 				defer layer.Close()
 				defer layer.Close()
 
 
-				err = p.graph.Register(img,
+				err = p.graph.Register(v1Descriptor{img},
 					progressreader.New(progressreader.Config{
 					progressreader.New(progressreader.Config{
 						In:        layer,
 						In:        layer,
 						Out:       broadcaster,
 						Out:       broadcaster,

+ 375 - 103
graph/pull_v2.go

@@ -1,10 +1,13 @@
 package graph
 package graph
 
 
 import (
 import (
+	"encoding/json"
+	"errors"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"os"
 	"os"
+	"sync"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution"
 	"github.com/docker/distribution"
@@ -16,9 +19,7 @@ import (
 	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
-	"github.com/docker/docker/trust"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
-	"github.com/docker/libtrust"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
@@ -73,7 +74,8 @@ func (p *v2Puller) pullV2Repository(tag string) (err error) {
 
 
 	}
 	}
 
 
-	broadcaster, found := p.poolAdd("pull", taggedName)
+	poolKey := "v2:" + taggedName
+	broadcaster, found := p.poolAdd("pull", poolKey)
 	broadcaster.Add(p.config.OutStream)
 	broadcaster.Add(p.config.OutStream)
 	if found {
 	if found {
 		// Another pull of the same repository is already taking place; just wait for it to finish
 		// Another pull of the same repository is already taking place; just wait for it to finish
@@ -83,7 +85,7 @@ func (p *v2Puller) pullV2Repository(tag string) (err error) {
 	// This must use a closure so it captures the value of err when the
 	// This must use a closure so it captures the value of err when the
 	// function returns, not when the 'defer' is evaluated.
 	// function returns, not when the 'defer' is evaluated.
 	defer func() {
 	defer func() {
-		p.poolRemoveWithError("pull", taggedName, err)
+		p.poolRemoveWithError("pull", poolKey, err)
 	}()
 	}()
 
 
 	var layersDownloaded bool
 	var layersDownloaded bool
@@ -104,7 +106,8 @@ func (p *v2Puller) pullV2Repository(tag string) (err error) {
 
 
 // downloadInfo is used to pass information from download to extractor
 // downloadInfo is used to pass information from download to extractor
 type downloadInfo struct {
 type downloadInfo struct {
-	img         *image.Image
+	img         contentAddressableDescriptor
+	imgIndex    int
 	tmpFile     *os.File
 	tmpFile     *os.File
 	digest      digest.Digest
 	digest      digest.Digest
 	layer       distribution.ReadSeekCloser
 	layer       distribution.ReadSeekCloser
@@ -114,12 +117,66 @@ type downloadInfo struct {
 	broadcaster *broadcaster.Buffered
 	broadcaster *broadcaster.Buffered
 }
 }
 
 
+// contentAddressableDescriptor is used to pass image data from a manifest to the
+// graph.
+type contentAddressableDescriptor struct {
+	id              string
+	parent          string
+	strongID        digest.Digest
+	compatibilityID string
+	config          []byte
+	v1Compatibility []byte
+}
+
+func newContentAddressableImage(v1Compatibility []byte, blobSum digest.Digest, parent digest.Digest) (contentAddressableDescriptor, error) {
+	img := contentAddressableDescriptor{
+		v1Compatibility: v1Compatibility,
+	}
+
+	var err error
+	img.config, err = image.MakeImageConfig(v1Compatibility, blobSum, parent)
+	if err != nil {
+		return img, err
+	}
+	img.strongID, err = image.StrongID(img.config)
+	if err != nil {
+		return img, err
+	}
+
+	unmarshalledConfig, err := image.NewImgJSON(v1Compatibility)
+	if err != nil {
+		return img, err
+	}
+
+	img.compatibilityID = unmarshalledConfig.ID
+	img.id = img.strongID.Hex()
+
+	return img, nil
+}
+
+// ID returns the actual ID to be used for the downloaded image. This may be
+// a computed ID.
+func (img contentAddressableDescriptor) ID() string {
+	return img.id
+}
+
+// Parent returns the parent ID to be used for the image. This may be a
+// computed ID.
+func (img contentAddressableDescriptor) Parent() string {
+	return img.parent
+}
+
+// MarshalConfig renders the image structure into JSON.
+func (img contentAddressableDescriptor) MarshalConfig() ([]byte, error) {
+	return img.config, nil
+}
+
 type errVerification struct{}
 type errVerification struct{}
 
 
 func (errVerification) Error() string { return "verification failed" }
 func (errVerification) Error() string { return "verification failed" }
 
 
 func (p *v2Puller) download(di *downloadInfo) {
 func (p *v2Puller) download(di *downloadInfo) {
-	logrus.Debugf("pulling blob %q to %s", di.digest, di.img.ID)
+	logrus.Debugf("pulling blob %q to %s", di.digest, di.img.id)
 
 
 	blobs := p.repo.Blobs(context.Background())
 	blobs := p.repo.Blobs(context.Background())
 
 
@@ -151,12 +208,12 @@ func (p *v2Puller) download(di *downloadInfo) {
 		Formatter: p.sf,
 		Formatter: p.sf,
 		Size:      di.size,
 		Size:      di.size,
 		NewLines:  false,
 		NewLines:  false,
-		ID:        stringid.TruncateID(di.img.ID),
+		ID:        stringid.TruncateID(di.img.id),
 		Action:    "Downloading",
 		Action:    "Downloading",
 	})
 	})
 	io.Copy(di.tmpFile, reader)
 	io.Copy(di.tmpFile, reader)
 
 
-	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Verifying Checksum", nil))
+	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Verifying Checksum", nil))
 
 
 	if !verifier.Verified() {
 	if !verifier.Verified() {
 		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
 		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
@@ -165,15 +222,15 @@ func (p *v2Puller) download(di *downloadInfo) {
 		return
 		return
 	}
 	}
 
 
-	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))
+	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.id), "Download complete", nil))
 
 
-	logrus.Debugf("Downloaded %s to tempfile %s", di.img.ID, di.tmpFile.Name())
+	logrus.Debugf("Downloaded %s to tempfile %s", di.img.id, di.tmpFile.Name())
 	di.layer = layerDownload
 	di.layer = layerDownload
 
 
 	di.err <- nil
 	di.err <- nil
 }
 }
 
 
-func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (verified bool, err error) {
+func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (tagUpdated bool, err error) {
 	logrus.Debugf("Pulling tag from V2 registry: %q", tag)
 	logrus.Debugf("Pulling tag from V2 registry: %q", tag)
 
 
 	manSvc, err := p.repo.Manifests(context.Background())
 	manSvc, err := p.repo.Manifests(context.Background())
@@ -181,16 +238,28 @@ func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (verified bo
 		return false, err
 		return false, err
 	}
 	}
 
 
-	manifest, err := manSvc.GetByTag(tag)
+	unverifiedManifest, err := manSvc.GetByTag(tag)
 	if err != nil {
 	if err != nil {
 		return false, err
 		return false, err
 	}
 	}
-	verified, err = p.validateManifest(manifest, tag)
+	if unverifiedManifest == nil {
+		return false, fmt.Errorf("image manifest does not exist for tag %q", tag)
+	}
+	var verifiedManifest *manifest.Manifest
+	verifiedManifest, err = verifyManifest(unverifiedManifest, tag)
+	if err != nil {
+		return false, err
+	}
+
+	// remove duplicate layers and check parent chain validity
+	err = fixManifestLayers(verifiedManifest)
 	if err != nil {
 	if err != nil {
 		return false, err
 		return false, err
 	}
 	}
-	if verified {
-		logrus.Printf("Image manifest for %s has been verified", taggedName)
+
+	imgs, err := p.getImageInfos(verifiedManifest)
+	if err != nil {
+		return false, err
 	}
 	}
 
 
 	out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))
 	out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))
@@ -212,27 +281,33 @@ func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (verified bo
 		}
 		}
 	}()
 	}()
 
 
-	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
-		img, err := image.NewImgJSON([]byte(manifest.History[i].V1Compatibility))
-		if err != nil {
-			logrus.Debugf("error getting image v1 json: %v", err)
-			return false, err
-		}
-		p.graph.Retain(p.sessionID, img.ID)
-		layerIDs = append(layerIDs, img.ID)
+	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
+		img := imgs[i]
+
+		p.graph.Retain(p.sessionID, img.id)
+		layerIDs = append(layerIDs, img.id)
+
+		p.graph.imageMutex.Lock(img.id)
 
 
 		// Check if exists
 		// Check if exists
-		if p.graph.Exists(img.ID) {
-			logrus.Debugf("Image already exists: %s", img.ID)
-			out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Already exists", nil))
+		if p.graph.Exists(img.id) {
+			if err := p.validateImageInGraph(img.id, imgs, i); err != nil {
+				p.graph.imageMutex.Unlock(img.id)
+				return false, fmt.Errorf("image validation failed: %v", err)
+			}
+			logrus.Debugf("Image already exists: %s", img.id)
+			p.graph.imageMutex.Unlock(img.id)
 			continue
 			continue
 		}
 		}
-		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))
+		p.graph.imageMutex.Unlock(img.id)
+
+		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.id), "Pulling fs layer", nil))
 
 
 		d := &downloadInfo{
 		d := &downloadInfo{
-			img:     img,
-			poolKey: "layer:" + img.ID,
-			digest:  manifest.FSLayers[i].BlobSum,
+			img:      img,
+			imgIndex: i,
+			poolKey:  "v2layer:" + img.id,
+			digest:   verifiedManifest.FSLayers[i].BlobSum,
 			// TODO: seems like this chan buffer solved hanging problem in go1.5,
 			// TODO: seems like this chan buffer solved hanging problem in go1.5,
 			// this can indicate some deeper problem that somehow we never take
 			// this can indicate some deeper problem that somehow we never take
 			// error from channel in loop below
 			// error from channel in loop below
@@ -257,7 +332,6 @@ func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (verified bo
 		}
 		}
 	}
 	}
 
 
-	var tagUpdated bool
 	for _, d := range downloads {
 	for _, d := range downloads {
 		if err := <-d.err; err != nil {
 		if err := <-d.err; err != nil {
 			return false, err
 			return false, err
@@ -274,31 +348,54 @@ func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (verified bo
 		}
 		}
 
 
 		d.tmpFile.Seek(0, 0)
 		d.tmpFile.Seek(0, 0)
-		reader := progressreader.New(progressreader.Config{
-			In:        d.tmpFile,
-			Out:       d.broadcaster,
-			Formatter: p.sf,
-			Size:      d.size,
-			NewLines:  false,
-			ID:        stringid.TruncateID(d.img.ID),
-			Action:    "Extracting",
-		})
-
-		err = p.graph.Register(d.img, reader)
-		if err != nil {
-			return false, err
-		}
+		err := func() error {
+			reader := progressreader.New(progressreader.Config{
+				In:        d.tmpFile,
+				Out:       d.broadcaster,
+				Formatter: p.sf,
+				Size:      d.size,
+				NewLines:  false,
+				ID:        stringid.TruncateID(d.img.id),
+				Action:    "Extracting",
+			})
+
+			p.graph.imageMutex.Lock(d.img.id)
+			defer p.graph.imageMutex.Unlock(d.img.id)
+
+			// Must recheck the data on disk if any exists.
+			// This protects against races where something
+			// else is written to the graph under this ID
+			// after attemptIDReuse.
+			if p.graph.Exists(d.img.id) {
+				if err := p.validateImageInGraph(d.img.id, imgs, d.imgIndex); err != nil {
+					return fmt.Errorf("image validation failed: %v", err)
+				}
+			}
+
+			if err := p.graph.register(d.img, reader); err != nil {
+				return err
+			}
+
+			if err := p.graph.setLayerDigest(d.img.id, d.digest); err != nil {
+				return err
+			}
+
+			if err := p.graph.setV1CompatibilityConfig(d.img.id, d.img.v1Compatibility); err != nil {
+				return err
+			}
 
 
-		if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
+			return nil
+		}()
+		if err != nil {
 			return false, err
 			return false, err
 		}
 		}
 
 
-		d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
+		d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.id), "Pull complete", nil))
 		d.broadcaster.Close()
 		d.broadcaster.Close()
 		tagUpdated = true
 		tagUpdated = true
 	}
 	}
 
 
-	manifestDigest, _, err := digestFromManifest(manifest, p.repoInfo.LocalName)
+	manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName)
 	if err != nil {
 	if err != nil {
 		return false, err
 		return false, err
 	}
 	}
@@ -318,10 +415,6 @@ func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (verified bo
 		}
 		}
 	}
 	}
 
 
-	if verified && tagUpdated {
-		out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
-	}
-
 	firstID := layerIDs[len(layerIDs)-1]
 	firstID := layerIDs[len(layerIDs)-1]
 	if utils.DigestReference(tag) {
 	if utils.DigestReference(tag) {
 		// TODO(stevvooe): Ideally, we should always set the digest so we can
 		// TODO(stevvooe): Ideally, we should always set the digest so we can
@@ -345,82 +438,261 @@ func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (verified bo
 	return tagUpdated, nil
 	return tagUpdated, nil
 }
 }
 
 
-// verifyTrustedKeys checks the keys provided against the trust store,
-// ensuring that the provided keys are trusted for the namespace. The keys
-// provided from this method must come from the signatures provided as part of
-// the manifest JWS package, obtained from unpackSignedManifest or libtrust.
-func (p *v2Puller) verifyTrustedKeys(namespace string, keys []libtrust.PublicKey) (verified bool, err error) {
-	if namespace[0] != '/' {
-		namespace = "/" + namespace
-	}
-
-	for _, key := range keys {
-		b, err := key.MarshalJSON()
-		if err != nil {
-			return false, fmt.Errorf("error marshalling public key: %s", err)
-		}
-		// Check key has read/write permission (0x03)
-		v, err := p.trustService.CheckKey(namespace, b, 0x03)
-		if err != nil {
-			vErr, ok := err.(trust.NotVerifiedError)
-			if !ok {
-				return false, fmt.Errorf("error running key check: %s", err)
-			}
-			logrus.Debugf("Key check result: %v", vErr)
-		}
-		verified = v
-	}
-
-	if verified {
-		logrus.Debug("Key check result: verified")
-	}
-
-	return
-}
-
-func (p *v2Puller) validateManifest(m *manifest.SignedManifest, tag string) (verified bool, err error) {
+func verifyManifest(signedManifest *manifest.SignedManifest, tag string) (m *manifest.Manifest, err error) {
 	// If pull by digest, then verify the manifest digest. NOTE: It is
 	// If pull by digest, then verify the manifest digest. NOTE: It is
 	// important to do this first, before any other content validation. If the
 	// important to do this first, before any other content validation. If the
 	// digest cannot be verified, don't even bother with those other things.
 	// digest cannot be verified, don't even bother with those other things.
 	if manifestDigest, err := digest.ParseDigest(tag); err == nil {
 	if manifestDigest, err := digest.ParseDigest(tag); err == nil {
 		verifier, err := digest.NewDigestVerifier(manifestDigest)
 		verifier, err := digest.NewDigestVerifier(manifestDigest)
 		if err != nil {
 		if err != nil {
-			return false, err
+			return nil, err
 		}
 		}
-		payload, err := m.Payload()
+		payload, err := signedManifest.Payload()
 		if err != nil {
 		if err != nil {
-			return false, err
+			// If this failed, the signatures section was corrupted
+			// or missing. Treat the entire manifest as the payload.
+			payload = signedManifest.Raw
 		}
 		}
 		if _, err := verifier.Write(payload); err != nil {
 		if _, err := verifier.Write(payload); err != nil {
-			return false, err
+			return nil, err
 		}
 		}
 		if !verifier.Verified() {
 		if !verifier.Verified() {
 			err := fmt.Errorf("image verification failed for digest %s", manifestDigest)
 			err := fmt.Errorf("image verification failed for digest %s", manifestDigest)
 			logrus.Error(err)
 			logrus.Error(err)
-			return false, err
+			return nil, err
 		}
 		}
-	}
 
 
-	// TODO(tiborvass): what's the usecase for having manifest == nil and err == nil ? Shouldn't be the error be "DoesNotExist" ?
-	if m == nil {
-		return false, fmt.Errorf("image manifest does not exist for tag %q", tag)
+		var verifiedManifest manifest.Manifest
+		if err = json.Unmarshal(payload, &verifiedManifest); err != nil {
+			return nil, err
+		}
+		m = &verifiedManifest
+	} else {
+		m = &signedManifest.Manifest
 	}
 	}
+
 	if m.SchemaVersion != 1 {
 	if m.SchemaVersion != 1 {
-		return false, fmt.Errorf("unsupported schema version %d for tag %q", m.SchemaVersion, tag)
+		return nil, fmt.Errorf("unsupported schema version %d for tag %q", m.SchemaVersion, tag)
 	}
 	}
 	if len(m.FSLayers) != len(m.History) {
 	if len(m.FSLayers) != len(m.History) {
-		return false, fmt.Errorf("length of history not equal to number of layers for tag %q", tag)
+		return nil, fmt.Errorf("length of history not equal to number of layers for tag %q", tag)
 	}
 	}
 	if len(m.FSLayers) == 0 {
 	if len(m.FSLayers) == 0 {
-		return false, fmt.Errorf("no FSLayers in manifest for tag %q", tag)
+		return nil, fmt.Errorf("no FSLayers in manifest for tag %q", tag)
+	}
+	return m, nil
+}
+
+// fixManifestLayers removes repeated layers from the manifest and checks the
+// correctness of the parent chain.
+func fixManifestLayers(m *manifest.Manifest) error {
+	images := make([]*image.Image, len(m.FSLayers))
+	for i := range m.FSLayers {
+		img, err := image.NewImgJSON([]byte(m.History[i].V1Compatibility))
+		if err != nil {
+			return err
+		}
+		images[i] = img
+		if err := image.ValidateID(img.ID); err != nil {
+			return err
+		}
+	}
+
+	if images[len(images)-1].Parent != "" {
+		return errors.New("Invalid parent ID in the base layer of the image.")
+	}
+
+	// check general duplicates to error instead of a deadlock
+	idmap := make(map[string]struct{})
+
+	var lastID string
+	for _, img := range images {
+		// skip IDs that appear after each other, we handle those later
+		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
+			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
+		}
+		lastID = img.ID
+		idmap[lastID] = struct{}{}
+	}
+
+	// backwards loop so that we keep the remaining indexes after removing items
+	for i := len(images) - 2; i >= 0; i-- {
+		if images[i].ID == images[i+1].ID { // repeated ID. remove and continue
+			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
+			m.History = append(m.History[:i], m.History[i+1:]...)
+		} else if images[i].Parent != images[i+1].ID {
+			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", images[i+1].ID, images[i].Parent)
+		}
 	}
 	}
-	keys, err := manifest.Verify(m)
+
+	return nil
+}
+
+// getImageInfos returns an imageinfo struct for every image in the manifest.
+// These objects contain both calculated strongIDs and compatibilityIDs found
+// in v1Compatibility object.
+func (p *v2Puller) getImageInfos(m *manifest.Manifest) ([]contentAddressableDescriptor, error) {
+	imgs := make([]contentAddressableDescriptor, len(m.FSLayers))
+
+	var parent digest.Digest
+	for i := len(imgs) - 1; i >= 0; i-- {
+		var err error
+		imgs[i], err = newContentAddressableImage([]byte(m.History[i].V1Compatibility), m.FSLayers[i].BlobSum, parent)
+		if err != nil {
+			return nil, err
+		}
+		parent = imgs[i].strongID
+	}
+
+	p.attemptIDReuse(imgs)
+
+	return imgs, nil
+}
+
+var idReuseLock sync.Mutex
+
+// attemptIDReuse does a best attempt to match verified compatibilityIDs
+// already in the graph with the computed strongIDs so we can keep using them.
+// This process will never fail but may just return the strongIDs if none of
+// the compatibilityIDs exists or can be verified. If the strongIDs themselves
+// fail verification, we deterministically generate alternate IDs to use until
+// we find one that's available or already exists with the correct data.
+func (p *v2Puller) attemptIDReuse(imgs []contentAddressableDescriptor) {
+	// This function needs to be protected with a global lock, because it
+	// locks multiple IDs at once, and there's no good way to make sure
+	// the locking happens a deterministic order.
+	idReuseLock.Lock()
+	defer idReuseLock.Unlock()
+
+	idMap := make(map[string]struct{})
+	for _, img := range imgs {
+		idMap[img.id] = struct{}{}
+		idMap[img.compatibilityID] = struct{}{}
+
+		if p.graph.Exists(img.compatibilityID) {
+			if _, err := p.graph.GenerateV1CompatibilityChain(img.compatibilityID); err != nil {
+				logrus.Debugf("Migration v1Compatibility generation error: %v", err)
+				return
+			}
+		}
+	}
+	for id := range idMap {
+		p.graph.imageMutex.Lock(id)
+		defer p.graph.imageMutex.Unlock(id)
+	}
+
+	// continueReuse controls whether the function will try to find
+	// existing layers on disk under the old v1 IDs, to avoid repulling
+	// them. The hashes are checked to ensure these layers are okay to
+	// use. continueReuse starts out as true, but is set to false if
+	// the code encounters something that doesn't match the expected hash.
+	continueReuse := true
+
+	for i := len(imgs) - 1; i >= 0; i-- {
+		if p.graph.Exists(imgs[i].id) {
+			// Found an image in the graph under the strongID. Validate the
+			// image before using it.
+			if err := p.validateImageInGraph(imgs[i].id, imgs, i); err != nil {
+				continueReuse = false
+				logrus.Debugf("not using existing strongID: %v", err)
+
+				// The strong ID existed in the graph but didn't
+				// validate successfully. We can't use the strong ID
+				// because it didn't validate successfully. Treat the
+				// graph like a hash table with probing... compute
+				// SHA256(id) until we find an ID that either doesn't
+				// already exist in the graph, or has existing content
+				// that validates successfully.
+				for {
+					if err := p.tryNextID(imgs, i, idMap); err != nil {
+						logrus.Debug(err.Error())
+					} else {
+						break
+					}
+				}
+			}
+			continue
+		}
+
+		if continueReuse {
+			compatibilityID := imgs[i].compatibilityID
+			if err := p.validateImageInGraph(compatibilityID, imgs, i); err != nil {
+				logrus.Debugf("stopping ID reuse: %v", err)
+				continueReuse = false
+			} else {
+				// The compatibility ID exists in the graph and was
+				// validated. Use it.
+				imgs[i].id = compatibilityID
+			}
+		}
+	}
+
+	// fix up the parents of the images
+	for i := 0; i < len(imgs); i++ {
+		if i == len(imgs)-1 { // Base layer
+			imgs[i].parent = ""
+		} else {
+			imgs[i].parent = imgs[i+1].id
+		}
+	}
+}
+
+// validateImageInGraph checks that an image in the graph has the expected
+// strongID. id is the entry in the graph to check, imgs is the slice of
+// images being processed (for access to the parent), and i is the index
+// into this slice which the graph entry should be checked against.
+func (p *v2Puller) validateImageInGraph(id string, imgs []contentAddressableDescriptor, i int) error {
+	img, err := p.graph.Get(id)
 	if err != nil {
 	if err != nil {
-		return false, fmt.Errorf("error verifying manifest for tag %q: %v", tag, err)
+		return fmt.Errorf("missing: %v", err)
 	}
 	}
-	verified, err = p.verifyTrustedKeys(m.Name, keys)
+	layerID, err := p.graph.getLayerDigest(id)
 	if err != nil {
 	if err != nil {
-		return false, fmt.Errorf("error verifying manifest keys: %v", err)
+		return fmt.Errorf("digest: %v", err)
 	}
 	}
-	return verified, nil
+	var parentID digest.Digest
+	if i != len(imgs)-1 {
+		if img.Parent != imgs[i+1].id { // comparing that graph points to validated ID
+			return fmt.Errorf("parent: %v %v", img.Parent, imgs[i+1].id)
+		}
+		parentID = imgs[i+1].strongID
+	} else if img.Parent != "" {
+		return fmt.Errorf("unexpected parent: %v", img.Parent)
+	}
+
+	v1Config, err := p.graph.getV1CompatibilityConfig(img.ID)
+	if err != nil {
+		return fmt.Errorf("v1Compatibility: %v %v", img.ID, err)
+	}
+
+	json, err := image.MakeImageConfig(v1Config, layerID, parentID)
+	if err != nil {
+		return fmt.Errorf("make config: %v", err)
+	}
+
+	if dgst, err := image.StrongID(json); err == nil && dgst == imgs[i].strongID {
+		logrus.Debugf("Validated %v as %v", dgst, id)
+	} else {
+		return fmt.Errorf("digest mismatch: %v %v, error: %v", dgst, imgs[i].strongID, err)
+	}
+
+	// All clear
+	return nil
+}
+
+func (p *v2Puller) tryNextID(imgs []contentAddressableDescriptor, i int, idMap map[string]struct{}) error {
+	nextID, _ := digest.FromBytes([]byte(imgs[i].id))
+	imgs[i].id = nextID.Hex()
+
+	if _, exists := idMap[imgs[i].id]; !exists {
+		p.graph.imageMutex.Lock(imgs[i].id)
+		defer p.graph.imageMutex.Unlock(imgs[i].id)
+	}
+
+	if p.graph.Exists(imgs[i].id) {
+		if err := p.validateImageInGraph(imgs[i].id, imgs, i); err != nil {
+			return fmt.Errorf("not using existing strongID permutation %s: %v", imgs[i].id, err)
+		}
+	}
+	return nil
 }
 }

File diff ditekan karena terlalu besar
+ 26 - 0
graph/pull_v2_test.go


+ 61 - 13
graph/push_v1.go

@@ -8,6 +8,7 @@ import (
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution/registry/client/transport"
 	"github.com/docker/distribution/registry/client/transport"
+	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/progressreader"
 	"github.com/docker/docker/pkg/progressreader"
 	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/pkg/streamformatter"
@@ -127,7 +128,7 @@ func (s *TagStore) createImageIndex(images []string, tags map[string][]string) [
 			continue
 			continue
 		}
 		}
 		// If the image does not have a tag it still needs to be sent to the
 		// If the image does not have a tag it still needs to be sent to the
-		// registry with an empty tag so that it is accociated with the repository
+		// registry with an empty tag so that it is associated with the repository
 		imageIndex = append(imageIndex, &registry.ImgData{
 		imageIndex = append(imageIndex, &registry.ImgData{
 			ID:  id,
 			ID:  id,
 			Tag: "",
 			Tag: "",
@@ -137,8 +138,9 @@ func (s *TagStore) createImageIndex(images []string, tags map[string][]string) [
 }
 }
 
 
 type imagePushData struct {
 type imagePushData struct {
-	id       string
-	endpoint string
+	id              string
+	compatibilityID string
+	endpoint        string
 }
 }
 
 
 // lookupImageOnEndpoint checks the specified endpoint to see if an image exists
 // lookupImageOnEndpoint checks the specified endpoint to see if an image exists
@@ -146,7 +148,7 @@ type imagePushData struct {
 func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, images chan imagePushData, imagesToPush chan string) {
 func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, images chan imagePushData, imagesToPush chan string) {
 	defer wg.Done()
 	defer wg.Done()
 	for image := range images {
 	for image := range images {
-		if err := p.session.LookupRemoteImage(image.id, image.endpoint); err != nil {
+		if err := p.session.LookupRemoteImage(image.compatibilityID, image.endpoint); err != nil {
 			logrus.Errorf("Error in LookupRemoteImage: %s", err)
 			logrus.Errorf("Error in LookupRemoteImage: %s", err)
 			imagesToPush <- image.id
 			imagesToPush <- image.id
 			continue
 			continue
@@ -180,9 +182,14 @@ func (p *v1Pusher) pushImageToEndpoint(endpoint string, imageIDs []string, tags
 		pushes <- shouldPush
 		pushes <- shouldPush
 	}()
 	}()
 	for _, id := range imageIDs {
 	for _, id := range imageIDs {
+		compatibilityID, err := p.getV1ID(id)
+		if err != nil {
+			return err
+		}
 		imageData <- imagePushData{
 		imageData <- imagePushData{
-			id:       id,
-			endpoint: endpoint,
+			id:              id,
+			compatibilityID: compatibilityID,
+			endpoint:        endpoint,
 		}
 		}
 	}
 	}
 	// close the channel to notify the workers that there will be no more images to check.
 	// close the channel to notify the workers that there will be no more images to check.
@@ -202,7 +209,11 @@ func (p *v1Pusher) pushImageToEndpoint(endpoint string, imageIDs []string, tags
 		}
 		}
 		for _, tag := range tags[id] {
 		for _, tag := range tags[id] {
 			p.out.Write(p.sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(id), endpoint+"repositories/"+p.repoInfo.RemoteName+"/tags/"+tag))
 			p.out.Write(p.sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(id), endpoint+"repositories/"+p.repoInfo.RemoteName+"/tags/"+tag))
-			if err := p.session.PushRegistryTag(p.repoInfo.RemoteName, id, tag, endpoint); err != nil {
+			compatibilityID, err := p.getV1ID(id)
+			if err != nil {
+				return err
+			}
+			if err := p.session.PushRegistryTag(p.repoInfo.RemoteName, compatibilityID, tag, endpoint); err != nil {
 				return err
 				return err
 			}
 			}
 		}
 		}
@@ -224,6 +235,12 @@ func (p *v1Pusher) pushRepository(tag string) error {
 	logrus.Debugf("Preparing to push %s with the following images and tags", p.localRepo)
 	logrus.Debugf("Preparing to push %s with the following images and tags", p.localRepo)
 	for _, data := range imageIndex {
 	for _, data := range imageIndex {
 		logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag)
 		logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag)
+
+		// convert IDs to compatibilityIDs, imageIndex only used in registry calls
+		data.ID, err = p.getV1ID(data.ID)
+		if err != nil {
+			return err
+		}
 	}
 	}
 
 
 	if _, found := p.poolAdd("push", p.repoInfo.LocalName); found {
 	if _, found := p.poolAdd("push", p.repoInfo.LocalName); found {
@@ -253,20 +270,27 @@ func (p *v1Pusher) pushRepository(tag string) error {
 }
 }
 
 
 func (p *v1Pusher) pushImage(imgID, ep string) (checksum string, err error) {
 func (p *v1Pusher) pushImage(imgID, ep string) (checksum string, err error) {
-	jsonRaw, err := p.graph.RawJSON(imgID)
+	jsonRaw, err := p.getV1Config(imgID)
 	if err != nil {
 	if err != nil {
 		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
 		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
 	}
 	}
 	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil))
 	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil))
 
 
+	compatibilityID, err := p.getV1ID(imgID)
+	if err != nil {
+		return "", err
+	}
+
+	// General rule is to use ID for graph accesses and compatibilityID for
+	// calls to session.registry()
 	imgData := &registry.ImgData{
 	imgData := &registry.ImgData{
-		ID: imgID,
+		ID: compatibilityID,
 	}
 	}
 
 
 	// Send the json
 	// Send the json
 	if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
 	if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
 		if err == registry.ErrAlreadyExists {
 		if err == registry.ErrAlreadyExists {
-			p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
+			p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image already pushed, skipping", nil))
 			return "", nil
 			return "", nil
 		}
 		}
 		return "", err
 		return "", err
@@ -279,7 +303,7 @@ func (p *v1Pusher) pushImage(imgID, ep string) (checksum string, err error) {
 	defer os.RemoveAll(layerData.Name())
 	defer os.RemoveAll(layerData.Name())
 
 
 	// Send the layer
 	// Send the layer
-	logrus.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
+	logrus.Debugf("rendered layer for %s of [%d] size", imgID, layerData.Size)
 
 
 	checksum, checksumPayload, err := p.session.PushImageLayerRegistry(imgData.ID,
 	checksum, checksumPayload, err := p.session.PushImageLayerRegistry(imgData.ID,
 		progressreader.New(progressreader.Config{
 		progressreader.New(progressreader.Config{
@@ -288,7 +312,7 @@ func (p *v1Pusher) pushImage(imgID, ep string) (checksum string, err error) {
 			Formatter: p.sf,
 			Formatter: p.sf,
 			Size:      layerData.Size,
 			Size:      layerData.Size,
 			NewLines:  false,
 			NewLines:  false,
-			ID:        stringid.TruncateID(imgData.ID),
+			ID:        stringid.TruncateID(imgID),
 			Action:    "Pushing",
 			Action:    "Pushing",
 		}), ep, jsonRaw)
 		}), ep, jsonRaw)
 	if err != nil {
 	if err != nil {
@@ -301,6 +325,30 @@ func (p *v1Pusher) pushImage(imgID, ep string) (checksum string, err error) {
 		return "", err
 		return "", err
 	}
 	}
 
 
-	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image successfully pushed", nil))
+	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image successfully pushed", nil))
 	return imgData.Checksum, nil
 	return imgData.Checksum, nil
 }
 }
+
+// getV1ID returns the compatibilityID for the ID in the graph. compatibilityID
+// is read from from the v1Compatibility config file in the disk.
+func (p *v1Pusher) getV1ID(id string) (string, error) {
+	jsonData, err := p.getV1Config(id)
+	if err != nil {
+		return "", err
+	}
+	img, err := image.NewImgJSON(jsonData)
+	if err != nil {
+		return "", err
+	}
+	return img.ID, nil
+}
+
+// getV1Config returns v1Compatibility config for the image in the graph. If
+// there is no v1Compatibility file on disk for the image
+func (p *v1Pusher) getV1Config(id string) ([]byte, error) {
+	jsonData, err := p.graph.GenerateV1CompatibilityChain(id)
+	if err != nil {
+		return nil, err
+	}
+	return jsonData, nil
+}

+ 8 - 7
graph/push_v2.go

@@ -138,13 +138,8 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
 			}
 			}
 		}
 		}
 
 
-		jsonData, err := p.graph.RawJSON(layer.ID)
-		if err != nil {
-			return fmt.Errorf("cannot retrieve the path for %s: %s", layer.ID, err)
-		}
-
 		var exists bool
 		var exists bool
-		dgst, err := p.graph.GetDigest(layer.ID)
+		dgst, err := p.graph.GetLayerDigest(layer.ID)
 		switch err {
 		switch err {
 		case nil:
 		case nil:
 			if p.layersPushed[dgst] {
 			if p.layersPushed[dgst] {
@@ -178,13 +173,19 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
 				return err
 				return err
 			} else if pushDigest != dgst {
 			} else if pushDigest != dgst {
 				// Cache new checksum
 				// Cache new checksum
-				if err := p.graph.SetDigest(layer.ID, pushDigest); err != nil {
+				if err := p.graph.SetLayerDigest(layer.ID, pushDigest); err != nil {
 					return err
 					return err
 				}
 				}
 				dgst = pushDigest
 				dgst = pushDigest
 			}
 			}
 		}
 		}
 
 
+		// read v1Compatibility config, generate new if needed
+		jsonData, err := p.graph.GenerateV1CompatibilityChain(layer.ID)
+		if err != nil {
+			return err
+		}
+
 		m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst})
 		m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst})
 		m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)})
 		m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)})
 
 

+ 3 - 2
graph/registry.go

@@ -102,8 +102,9 @@ func NewV2Repository(repoInfo *registry.RepositoryInfo, endpoint registry.APIEnd
 func digestFromManifest(m *manifest.SignedManifest, localName string) (digest.Digest, int, error) {
 func digestFromManifest(m *manifest.SignedManifest, localName string) (digest.Digest, int, error) {
 	payload, err := m.Payload()
 	payload, err := m.Payload()
 	if err != nil {
 	if err != nil {
-		logrus.Debugf("could not retrieve manifest payload: %v", err)
-		return "", 0, err
+		// If this failed, the signatures section was corrupted
+		// or missing. Treat the entire manifest as the payload.
+		payload = m.Raw
 	}
 	}
 	manifestDigest, err := digest.FromBytes(payload)
 	manifestDigest, err := digest.FromBytes(payload)
 	if err != nil {
 	if err != nil {

+ 0 - 5
graph/tags.go

@@ -20,7 +20,6 @@ import (
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
-	"github.com/docker/docker/trust"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
 	"github.com/docker/libtrust"
 	"github.com/docker/libtrust"
 )
 )
@@ -41,7 +40,6 @@ type TagStore struct {
 	pushingPool     map[string]*broadcaster.Buffered
 	pushingPool     map[string]*broadcaster.Buffered
 	registryService *registry.Service
 	registryService *registry.Service
 	eventsService   *events.Events
 	eventsService   *events.Events
-	trustService    *trust.Store
 }
 }
 
 
 // Repository maps tags to image IDs.
 // Repository maps tags to image IDs.
@@ -77,8 +75,6 @@ type TagStoreConfig struct {
 	Registry *registry.Service
 	Registry *registry.Service
 	// Events is the events service to use for logging.
 	// Events is the events service to use for logging.
 	Events *events.Events
 	Events *events.Events
-	// Trust is the trust service to use for push and pull operations.
-	Trust *trust.Store
 }
 }
 
 
 // NewTagStore creates a new TagStore at specified path, using the parameters
 // NewTagStore creates a new TagStore at specified path, using the parameters
@@ -98,7 +94,6 @@ func NewTagStore(path string, cfg *TagStoreConfig) (*TagStore, error) {
 		pushingPool:     make(map[string]*broadcaster.Buffered),
 		pushingPool:     make(map[string]*broadcaster.Buffered),
 		registryService: cfg.Registry,
 		registryService: cfg.Registry,
 		eventsService:   cfg.Events,
 		eventsService:   cfg.Events,
-		trustService:    cfg.Trust,
 	}
 	}
 	// Load the json file if it exists, otherwise create it.
 	// Load the json file if it exists, otherwise create it.
 	if err := store.reload(); os.IsNotExist(err) {
 	if err := store.reload(); os.IsNotExist(err) {

+ 2 - 9
graph/tags_unit_test.go

@@ -13,7 +13,6 @@ import (
 	_ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
 	_ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
 	"github.com/docker/docker/graph/tags"
 	"github.com/docker/docker/graph/tags"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/image"
-	"github.com/docker/docker/trust"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
 )
 )
 
 
@@ -63,15 +62,9 @@ func mkTestTagStore(root string, t *testing.T) *TagStore {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 
 
-	trust, err := trust.NewStore(root + "/trust")
-	if err != nil {
-		t.Fatal(err)
-	}
-
 	tagCfg := &TagStoreConfig{
 	tagCfg := &TagStoreConfig{
 		Graph:  graph,
 		Graph:  graph,
 		Events: events.New(),
 		Events: events.New(),
-		Trust:  trust,
 	}
 	}
 	store, err := NewTagStore(path.Join(root, "tags"), tagCfg)
 	store, err := NewTagStore(path.Join(root, "tags"), tagCfg)
 	if err != nil {
 	if err != nil {
@@ -82,7 +75,7 @@ func mkTestTagStore(root string, t *testing.T) *TagStore {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	img := &image.Image{ID: testOfficialImageID}
 	img := &image.Image{ID: testOfficialImageID}
-	if err := graph.Register(img, officialArchive); err != nil {
+	if err := graph.Register(v1Descriptor{img}, officialArchive); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	if err := store.Tag(testOfficialImageName, "", testOfficialImageID, false); err != nil {
 	if err := store.Tag(testOfficialImageName, "", testOfficialImageID, false); err != nil {
@@ -93,7 +86,7 @@ func mkTestTagStore(root string, t *testing.T) *TagStore {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	img = &image.Image{ID: testPrivateImageID}
 	img = &image.Image{ID: testPrivateImageID}
-	if err := graph.Register(img, privateArchive); err != nil {
+	if err := graph.Register(v1Descriptor{img}, privateArchive); err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}
 	if err := store.Tag(testPrivateImageName, "", testPrivateImageID, false); err != nil {
 	if err := store.Tag(testPrivateImageName, "", testPrivateImageID, false); err != nil {

+ 1 - 0
image/fixtures/post1.9/expected_computed_id

@@ -0,0 +1 @@
+sha256:f2722a8ec6926e02fa9f2674072cbc2a25cf0f449f27350f613cd843b02c9105

+ 1 - 0
image/fixtures/post1.9/expected_config

@@ -0,0 +1 @@
+{"architecture":"amd64","config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":null,"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"fb1f7270da9519308361b99dc8e0d30f12c24dfd28537c2337ece995ac853a16","container_config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":["/bin/sh","-c","#(nop) ADD file:11998b2a4d664a75cd0c3f4e4cb1837434e0f997ba157a0ac1d3c68a07aa2f4f in /"],"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-09-08T21:30:30.807853054Z","docker_version":"1.9.0-dev","layer_id":"sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a","os":"linux","parent_id":"sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02"}

+ 1 - 0
image/fixtures/post1.9/layer_id

@@ -0,0 +1 @@
+sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a

+ 1 - 0
image/fixtures/post1.9/parent_id

@@ -0,0 +1 @@
+sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02

+ 1 - 0
image/fixtures/post1.9/v1compatibility

@@ -0,0 +1 @@
+{"id":"8dfb96b5d09e6cf6f376d81f1e2770ee5ede309f9bd9e079688c9782649ab326","parent":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","created":"2015-09-08T21:30:30.807853054Z","container":"fb1f7270da9519308361b99dc8e0d30f12c24dfd28537c2337ece995ac853a16","container_config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":["/bin/sh","-c","#(nop) ADD file:11998b2a4d664a75cd0c3f4e4cb1837434e0f997ba157a0ac1d3c68a07aa2f4f in /"],"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"docker_version":"1.9.0-dev","config":{"Hostname":"fb1f7270da95","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["foo=bar"],"Cmd":null,"Image":"361a94d06b2b781b2f1ee6c72e1cbbfbbd032a103e26a3db75b431743829ae4f","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux"}

+ 1 - 0
image/fixtures/pre1.9/expected_computed_id

@@ -0,0 +1 @@
+sha256:fd6ebfedda8ea140a9380767e15bd32c6e899303cfe34bc4580c931f2f816f89

+ 1 - 0
image/fixtures/pre1.9/expected_config

@@ -0,0 +1 @@
+{"architecture":"amd64","config":{"Hostname":"03797203757d","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":null,"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"OnBuild":[],"Labels":{}},"container":"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253","container_config":{"Hostname":"03797203757d","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":["/bin/sh","-c","#(nop) ENTRYPOINT [\"/go/bin/dnsdock\"]"],"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"OnBuild":[],"Labels":{}},"created":"2015-08-19T16:49:11.368300679Z","docker_version":"1.6.2","layer_id":"sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a","os":"linux","parent_id":"sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02"}

+ 1 - 0
image/fixtures/pre1.9/layer_id

@@ -0,0 +1 @@
+sha256:31176893850e05d308cdbfef88877e460d50c8063883fb13eb5753097da6422a

+ 1 - 0
image/fixtures/pre1.9/parent_id

@@ -0,0 +1 @@
+sha256:ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02

+ 1 - 0
image/fixtures/pre1.9/v1compatibility

@@ -0,0 +1 @@
+{"id":"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9","parent":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","created":"2015-08-19T16:49:11.368300679Z","container":"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253","container_config":{"Hostname":"03797203757d","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":["/bin/sh","-c","#(nop) ENTRYPOINT [\"/go/bin/dnsdock\"]"],"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"NetworkDisabled":false,"MacAddress":"","OnBuild":[],"Labels":{}},"docker_version":"1.6.2","config":{"Hostname":"03797203757d","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","GOLANG_VERSION=1.4.1","GOPATH=/go"],"Cmd":null,"Image":"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02","Volumes":null,"WorkingDir":"/go","Entrypoint":["/go/bin/dnsdock"],"NetworkDisabled":false,"MacAddress":"","OnBuild":[],"Labels":{}},"architecture":"amd64","os":"linux","Size":0}

+ 92 - 2
image/image.go

@@ -2,19 +2,38 @@ package image
 
 
 import (
 import (
 	"encoding/json"
 	"encoding/json"
+	"fmt"
 	"regexp"
 	"regexp"
 	"time"
 	"time"
 
 
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
 	derr "github.com/docker/docker/errors"
 	derr "github.com/docker/docker/errors"
+	"github.com/docker/docker/pkg/version"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
 )
 )
 
 
 var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
 var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
 
 
+// noFallbackMinVersion is the minimum version for which v1compatibility
+// information will not be marshaled through the Image struct to remove
+// blank fields.
+var noFallbackMinVersion = version.Version("1.8.3")
+
+// Descriptor provides the information necessary to register an image in
+// the graph.
+type Descriptor interface {
+	ID() string
+	Parent() string
+	MarshalConfig() ([]byte, error)
+}
+
 // Image stores the image configuration.
 // Image stores the image configuration.
+// All fields in this struct must be marked `omitempty` to keep getting
+// predictable hashes from the old `v1Compatibility` configuration.
 type Image struct {
 type Image struct {
 	// ID a unique 64 character identifier of the image
 	// ID a unique 64 character identifier of the image
-	ID string `json:"id"`
+	ID string `json:"id,omitempty"`
 	// Parent id of the image
 	// Parent id of the image
 	Parent string `json:"parent,omitempty"`
 	Parent string `json:"parent,omitempty"`
 	// Comment user added comment
 	// Comment user added comment
@@ -36,7 +55,11 @@ type Image struct {
 	// OS is the operating system used to build and run the image
 	// OS is the operating system used to build and run the image
 	OS string `json:"os,omitempty"`
 	OS string `json:"os,omitempty"`
 	// Size is the total size of the image including all layers it is composed of
 	// Size is the total size of the image including all layers it is composed of
-	Size int64
+	Size int64 `json:",omitempty"` // capitalized for backwards compatibility
+	// ParentID specifies the strong, content address of the parent configuration.
+	ParentID digest.Digest `json:"parent_id,omitempty"`
+	// LayerID provides the content address of the associated layer.
+	LayerID digest.Digest `json:"layer_id,omitempty"`
 }
 }
 
 
 // NewImgJSON creates an Image configuration from json.
 // NewImgJSON creates an Image configuration from json.
@@ -57,3 +80,70 @@ func ValidateID(id string) error {
 	}
 	}
 	return nil
 	return nil
 }
 }
+
+// MakeImageConfig returns immutable configuration JSON for image based on the
+// v1Compatibility object, layer digest and parent StrongID. SHA256() of this
+// config is the new image ID (strongID).
+func MakeImageConfig(v1Compatibility []byte, layerID, parentID digest.Digest) ([]byte, error) {
+
+	// Detect images created after 1.8.3
+	img, err := NewImgJSON(v1Compatibility)
+	if err != nil {
+		return nil, err
+	}
+	useFallback := version.Version(img.DockerVersion).LessThan(noFallbackMinVersion)
+
+	if useFallback {
+		// Fallback for pre-1.8.3. Calculate base config based on Image struct
+		// so that fields with default values added by Docker will use same ID
+		logrus.Debugf("Using fallback hash for %v", layerID)
+
+		v1Compatibility, err = json.Marshal(img)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	var c map[string]*json.RawMessage
+	if err := json.Unmarshal(v1Compatibility, &c); err != nil {
+		return nil, err
+	}
+
+	if err := layerID.Validate(); err != nil {
+		return nil, fmt.Errorf("invalid layerID: %v", err)
+	}
+
+	c["layer_id"] = rawJSON(layerID)
+
+	if parentID != "" {
+		if err := parentID.Validate(); err != nil {
+			return nil, fmt.Errorf("invalid parentID %v", err)
+		}
+		c["parent_id"] = rawJSON(parentID)
+	}
+
+	delete(c, "id")
+	delete(c, "parent")
+	delete(c, "Size") // Size is calculated from data on disk and is inconsitent
+
+	return json.Marshal(c)
+}
+
+// StrongID returns image ID for the config JSON.
+func StrongID(configJSON []byte) (digest.Digest, error) {
+	digester := digest.Canonical.New()
+	if _, err := digester.Hash().Write(configJSON); err != nil {
+		return "", err
+	}
+	dgst := digester.Digest()
+	logrus.Debugf("H(%v) = %v", string(configJSON), dgst)
+	return dgst, nil
+}
+
+func rawJSON(value interface{}) *json.RawMessage {
+	jsonval, err := json.Marshal(value)
+	if err != nil {
+		return nil
+	}
+	return (*json.RawMessage)(&jsonval)
+}

+ 55 - 0
image/image_test.go

@@ -0,0 +1,55 @@
+package image
+
+import (
+	"bytes"
+	"io/ioutil"
+	"testing"
+
+	"github.com/docker/distribution/digest"
+)
+
+var fixtures = []string{
+	"fixtures/pre1.9",
+	"fixtures/post1.9",
+}
+
+func loadFixtureFile(t *testing.T, path string) []byte {
+	fileData, err := ioutil.ReadFile(path)
+	if err != nil {
+		t.Fatalf("error opening %s: %v", path, err)
+	}
+
+	return bytes.TrimSpace(fileData)
+}
+
+// TestMakeImageConfig makes sure that MakeImageConfig returns the expected
+// canonical JSON for a reference Image.
+func TestMakeImageConfig(t *testing.T) {
+	for _, fixture := range fixtures {
+		v1Compatibility := loadFixtureFile(t, fixture+"/v1compatibility")
+		expectedConfig := loadFixtureFile(t, fixture+"/expected_config")
+		layerID := digest.Digest(loadFixtureFile(t, fixture+"/layer_id"))
+		parentID := digest.Digest(loadFixtureFile(t, fixture+"/parent_id"))
+
+		json, err := MakeImageConfig(v1Compatibility, layerID, parentID)
+		if err != nil {
+			t.Fatalf("MakeImageConfig on %s returned error: %v", fixture, err)
+		}
+		if !bytes.Equal(json, expectedConfig) {
+			t.Fatalf("did not get expected JSON for %s\nexpected: %s\ngot: %s", fixture, expectedConfig, json)
+		}
+	}
+}
+
+// TestGetStrongID makes sure that GetConfigJSON returns the expected
+// hash for a reference Image.
+func TestGetStrongID(t *testing.T) {
+	for _, fixture := range fixtures {
+		expectedConfig := loadFixtureFile(t, fixture+"/expected_config")
+		expectedComputedID := digest.Digest(loadFixtureFile(t, fixture+"/expected_computed_id"))
+
+		if id, err := StrongID(expectedConfig); err != nil || id != expectedComputedID {
+			t.Fatalf("did not get expected ID for %s\nexpected: %s\ngot: %s\nerror: %v", fixture, expectedComputedID, id, err)
+		}
+	}
+}

+ 255 - 0
integration-cli/docker_cli_pull_test.go

@@ -1,7 +1,11 @@
 package main
 package main
 
 
 import (
 import (
+	"encoding/json"
 	"fmt"
 	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
 	"regexp"
 	"regexp"
 	"strings"
 	"strings"
 	"time"
 	"time"
@@ -159,3 +163,254 @@ func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) {
 		time.Sleep(500 * time.Millisecond)
 		time.Sleep(500 * time.Millisecond)
 	}
 	}
 }
 }
+
+type idAndParent struct {
+	ID     string
+	Parent string
+}
+
+func inspectImage(c *check.C, imageRef string) idAndParent {
+	out, _ := dockerCmd(c, "inspect", imageRef)
+	var inspectOutput []idAndParent
+	err := json.Unmarshal([]byte(out), &inspectOutput)
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	return inspectOutput[0]
+}
+
+func imageID(c *check.C, imageRef string) string {
+	return inspectImage(c, imageRef).ID
+}
+
+func imageParent(c *check.C, imageRef string) string {
+	return inspectImage(c, imageRef).Parent
+}
+
+// TestPullMigration verifies that pulling an image based on layers
+// that already exists locally will reuse those existing layers.
+func (s *DockerRegistrySuite) TestPullMigration(c *check.C) {
+	repoName := privateRegistryURL + "/dockercli/migration"
+
+	baseImage := repoName + ":base"
+	_, err := buildImage(baseImage, fmt.Sprintf(`
+	    FROM scratch
+	    ENV IMAGE base
+	    CMD echo %s
+	`, baseImage), true)
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	baseIDBeforePush := imageID(c, baseImage)
+	baseParentBeforePush := imageParent(c, baseImage)
+
+	derivedImage := repoName + ":derived"
+	_, err = buildImage(derivedImage, fmt.Sprintf(`
+	    FROM %s
+	    CMD echo %s
+	`, baseImage, derivedImage), true)
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	derivedIDBeforePush := imageID(c, derivedImage)
+
+	dockerCmd(c, "push", derivedImage)
+
+	// Remove derived image from the local store
+	dockerCmd(c, "rmi", derivedImage)
+
+	// Repull
+	dockerCmd(c, "pull", derivedImage)
+
+	// Check that the parent of this pulled image is the original base
+	// image
+	derivedIDAfterPull1 := imageID(c, derivedImage)
+	derivedParentAfterPull1 := imageParent(c, derivedImage)
+
+	if derivedIDAfterPull1 == derivedIDBeforePush {
+		c.Fatal("image's ID should have changed on after deleting and pulling")
+	}
+
+	if derivedParentAfterPull1 != baseIDBeforePush {
+		c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull1, baseIDBeforePush)
+	}
+
+	// Confirm that repushing and repulling does not change the computed ID
+	dockerCmd(c, "push", derivedImage)
+	dockerCmd(c, "rmi", derivedImage)
+	dockerCmd(c, "pull", derivedImage)
+
+	derivedIDAfterPull2 := imageID(c, derivedImage)
+	derivedParentAfterPull2 := imageParent(c, derivedImage)
+
+	if derivedIDAfterPull2 != derivedIDAfterPull1 {
+		c.Fatal("image's ID unexpectedly changed after a repush/repull")
+	}
+
+	if derivedParentAfterPull2 != baseIDBeforePush {
+		c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull2, baseIDBeforePush)
+	}
+
+	// Remove everything, repull, and make sure everything uses computed IDs
+	dockerCmd(c, "rmi", baseImage, derivedImage)
+	dockerCmd(c, "pull", derivedImage)
+
+	derivedIDAfterPull3 := imageID(c, derivedImage)
+	derivedParentAfterPull3 := imageParent(c, derivedImage)
+	derivedGrandparentAfterPull3 := imageParent(c, derivedParentAfterPull3)
+
+	if derivedIDAfterPull3 != derivedIDAfterPull1 {
+		c.Fatal("image's ID unexpectedly changed after a second repull")
+	}
+
+	if derivedParentAfterPull3 == baseIDBeforePush {
+		c.Fatalf("pulled image's parent ID (%s) should not match base image's original ID (%s)", derivedParentAfterPull3, derivedIDBeforePush)
+	}
+
+	if derivedGrandparentAfterPull3 == baseParentBeforePush {
+		c.Fatal("base image's parent ID should have been rewritten on pull")
+	}
+}
+
+// TestPullMigrationRun verifies that pulling an image based on layers
+// that already exists locally will result in an image that runs properly.
+func (s *DockerRegistrySuite) TestPullMigrationRun(c *check.C) {
+	type idAndParent struct {
+		ID     string
+		Parent string
+	}
+
+	derivedImage := privateRegistryURL + "/dockercli/migration-run"
+	baseImage := "busybox"
+
+	_, err := buildImage(derivedImage, fmt.Sprintf(`
+	    FROM %s
+	    RUN dd if=/dev/zero of=/file bs=1024 count=1024
+	    CMD echo %s
+	`, baseImage, derivedImage), true)
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	baseIDBeforePush := imageID(c, baseImage)
+	derivedIDBeforePush := imageID(c, derivedImage)
+
+	dockerCmd(c, "push", derivedImage)
+
+	// Remove derived image from the local store
+	dockerCmd(c, "rmi", derivedImage)
+
+	// Repull
+	dockerCmd(c, "pull", derivedImage)
+
+	// Check that this pulled image is based on the original base image
+	derivedIDAfterPull1 := imageID(c, derivedImage)
+	derivedParentAfterPull1 := imageParent(c, imageParent(c, derivedImage))
+
+	if derivedIDAfterPull1 == derivedIDBeforePush {
+		c.Fatal("image's ID should have changed on after deleting and pulling")
+	}
+
+	if derivedParentAfterPull1 != baseIDBeforePush {
+		c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull1, baseIDBeforePush)
+	}
+
+	// Make sure the image runs correctly
+	out, _ := dockerCmd(c, "run", "--rm", derivedImage)
+	if strings.TrimSpace(out) != derivedImage {
+		c.Fatalf("expected %s; got %s", derivedImage, out)
+	}
+
+	// Confirm that repushing and repulling does not change the computed ID
+	dockerCmd(c, "push", derivedImage)
+	dockerCmd(c, "rmi", derivedImage)
+	dockerCmd(c, "pull", derivedImage)
+
+	derivedIDAfterPull2 := imageID(c, derivedImage)
+	derivedParentAfterPull2 := imageParent(c, imageParent(c, derivedImage))
+
+	if derivedIDAfterPull2 != derivedIDAfterPull1 {
+		c.Fatal("image's ID unexpectedly changed after a repush/repull")
+	}
+
+	if derivedParentAfterPull2 != baseIDBeforePush {
+		c.Fatalf("pulled image's parent ID (%s) does not match base image's ID (%s)", derivedParentAfterPull2, baseIDBeforePush)
+	}
+
+	// Make sure the image still runs
+	out, _ = dockerCmd(c, "run", "--rm", derivedImage)
+	if strings.TrimSpace(out) != derivedImage {
+		c.Fatalf("expected %s; got %s", derivedImage, out)
+	}
+}
+
+// TestPullConflict provides coverage of the situation where a computed
+// strongID conflicts with some unverifiable data in the graph.
+func (s *DockerRegistrySuite) TestPullConflict(c *check.C) {
+	repoName := privateRegistryURL + "/dockercli/conflict"
+
+	_, err := buildImage(repoName, `
+	    FROM scratch
+	    ENV IMAGE conflict
+	    CMD echo conflict
+	`, true)
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	dockerCmd(c, "push", repoName)
+
+	// Pull to make it content-addressable
+	dockerCmd(c, "rmi", repoName)
+	dockerCmd(c, "pull", repoName)
+
+	IDBeforeLoad := imageID(c, repoName)
+
+	// Load/save to turn this into an unverified image with the same ID
+	tmpDir, err := ioutil.TempDir("", "conflict-save-output")
+	if err != nil {
+		c.Errorf("failed to create temporary directory: %s", err)
+	}
+	defer os.RemoveAll(tmpDir)
+
+	tarFile := filepath.Join(tmpDir, "repo.tar")
+
+	dockerCmd(c, "save", "-o", tarFile, repoName)
+	dockerCmd(c, "rmi", repoName)
+	dockerCmd(c, "load", "-i", tarFile)
+
+	// Check that the the ID is the same after save/load.
+	IDAfterLoad := imageID(c, repoName)
+
+	if IDAfterLoad != IDBeforeLoad {
+		c.Fatal("image's ID should be the same after save/load")
+	}
+
+	// Repull
+	dockerCmd(c, "pull", repoName)
+
+	// Check that the ID is now different because of the conflict.
+	IDAfterPull1 := imageID(c, repoName)
+
+	// Expect the new ID to be SHA256(oldID)
+	expectedIDDigest, err := digest.FromBytes([]byte(IDBeforeLoad))
+	if err != nil {
+		c.Fatalf("digest error: %v", err)
+	}
+	expectedID := expectedIDDigest.Hex()
+	if IDAfterPull1 != expectedID {
+		c.Fatalf("image's ID should have changed on pull to %s (got %s)", expectedID, IDAfterPull1)
+	}
+
+	// A second pull should use the new ID again.
+	dockerCmd(c, "pull", repoName)
+
+	IDAfterPull2 := imageID(c, repoName)
+
+	if IDAfterPull2 != IDAfterPull1 {
+		c.Fatal("image's ID unexpectedly changed after a repull")
+	}
+}

+ 7 - 4
runconfig/config.go

@@ -12,6 +12,8 @@ import (
 // It should hold only portable information about the container.
 // It should hold only portable information about the container.
 // Here, "portable" means "independent from the host we are running on".
 // Here, "portable" means "independent from the host we are running on".
 // Non-portable information *should* appear in HostConfig.
 // Non-portable information *should* appear in HostConfig.
+// All fields added to this struct must be marked `omitempty` to keep getting
+// predictable hashes from the old `v1Compatibility` configuration.
 type Config struct {
 type Config struct {
 	Hostname        string                // Hostname
 	Hostname        string                // Hostname
 	Domainname      string                // Domainname
 	Domainname      string                // Domainname
@@ -19,7 +21,8 @@ type Config struct {
 	AttachStdin     bool                  // Attach the standard input, makes possible user interaction
 	AttachStdin     bool                  // Attach the standard input, makes possible user interaction
 	AttachStdout    bool                  // Attach the standard output
 	AttachStdout    bool                  // Attach the standard output
 	AttachStderr    bool                  // Attach the standard error
 	AttachStderr    bool                  // Attach the standard error
-	ExposedPorts    map[nat.Port]struct{} // List of exposed ports
+	ExposedPorts    map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports
+	PublishService  string                `json:",omitempty"` // Name of the network service exposed by the container
 	Tty             bool                  // Attach standard streams to a tty, including stdin if it is not closed.
 	Tty             bool                  // Attach standard streams to a tty, including stdin if it is not closed.
 	OpenStdin       bool                  // Open stdin
 	OpenStdin       bool                  // Open stdin
 	StdinOnce       bool                  // If true, close stdin after the 1 attached client disconnects.
 	StdinOnce       bool                  // If true, close stdin after the 1 attached client disconnects.
@@ -29,11 +32,11 @@ type Config struct {
 	Volumes         map[string]struct{}   // List of volumes (mounts) used for the container
 	Volumes         map[string]struct{}   // List of volumes (mounts) used for the container
 	WorkingDir      string                // Current directory (PWD) in the command will be launched
 	WorkingDir      string                // Current directory (PWD) in the command will be launched
 	Entrypoint      *stringutils.StrSlice // Entrypoint to run when starting the container
 	Entrypoint      *stringutils.StrSlice // Entrypoint to run when starting the container
-	NetworkDisabled bool                  // Is network disabled
-	MacAddress      string                // Mac Address of the container
+	NetworkDisabled bool                  `json:",omitempty"` // Is network disabled
+	MacAddress      string                `json:",omitempty"` // Mac Address of the container
 	OnBuild         []string              // ONBUILD metadata that were defined on the image Dockerfile
 	OnBuild         []string              // ONBUILD metadata that were defined on the image Dockerfile
 	Labels          map[string]string     // List of labels set to this container
 	Labels          map[string]string     // List of labels set to this container
-	StopSignal      string                // Signal to stop a container
+	StopSignal      string                `json:",omitempty"` // Signal to stop a container
 }
 }
 
 
 // DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper
 // DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper

+ 0 - 59
trust/service.go

@@ -1,59 +0,0 @@
-package trust
-
-import (
-	"fmt"
-	"time"
-
-	"github.com/Sirupsen/logrus"
-	"github.com/docker/libtrust"
-)
-
-// NotVerifiedError reports a error when doing the key check.
-// For example if the graph is not verified or the key has expired.
-type NotVerifiedError string
-
-func (e NotVerifiedError) Error() string {
-	return string(e)
-}
-
-// CheckKey verifies that the given public key is allowed to perform
-// the given action on the given node according to the trust graph.
-func (t *Store) CheckKey(ns string, key []byte, perm uint16) (bool, error) {
-	if len(key) == 0 {
-		return false, fmt.Errorf("Missing PublicKey")
-	}
-	pk, err := libtrust.UnmarshalPublicKeyJWK(key)
-	if err != nil {
-		return false, fmt.Errorf("Error unmarshalling public key: %v", err)
-	}
-
-	if perm == 0 {
-		perm = 0x03
-	}
-
-	t.RLock()
-	defer t.RUnlock()
-	if t.graph == nil {
-		return false, NotVerifiedError("no graph")
-	}
-
-	// Check if any expired grants
-	verified, err := t.graph.Verify(pk, ns, perm)
-	if err != nil {
-		return false, fmt.Errorf("Error verifying key to namespace: %s", ns)
-	}
-	if !verified {
-		logrus.Debugf("Verification failed for %s using key %s", ns, pk.KeyID())
-		return false, NotVerifiedError("not verified")
-	}
-	if t.expiration.Before(time.Now()) {
-		return false, NotVerifiedError("expired")
-	}
-	return true, nil
-}
-
-// UpdateBase retrieves updated base graphs. This function cannot error, it
-// should only log errors.
-func (t *Store) UpdateBase() {
-	t.fetch()
-}

+ 0 - 201
trust/trusts.go

@@ -1,201 +0,0 @@
-package trust
-
-import (
-	"crypto/x509"
-	"errors"
-	"fmt"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-	"os"
-	"path"
-	"path/filepath"
-	"sync"
-	"time"
-
-	"github.com/Sirupsen/logrus"
-	"github.com/docker/libtrust/trustgraph"
-)
-
-// Store defines a TrustStore : stores trusted certificates and permissions
-// which are used to verify the signature keys on manifests.
-// Note: This is being deprecated by the notary work.
-type Store struct {
-	path          string
-	caPool        *x509.CertPool
-	graph         trustgraph.TrustGraph
-	expiration    time.Time
-	fetcher       *time.Timer
-	fetchTime     time.Duration
-	autofetch     bool
-	httpClient    *http.Client
-	baseEndpoints map[string]*url.URL
-
-	sync.RWMutex
-}
-
-// defaultFetchtime represents the starting duration to wait between
-// fetching sections of the graph.  Unsuccessful fetches should
-// increase time between fetching.
-const defaultFetchtime = 45 * time.Second
-
-var baseEndpoints = map[string]string{"official": "https://dvjy3tqbc323p.cloudfront.net/trust/official.json"}
-
-// NewStore creates a TrustStore from a given path, if the path is not
-// relative, it will be joined with the working directory.
-func NewStore(path string) (*Store, error) {
-	abspath, err := filepath.Abs(path)
-	if err != nil {
-		return nil, err
-	}
-
-	// Create base graph url map
-	endpoints := map[string]*url.URL{}
-	for name, endpoint := range baseEndpoints {
-		u, err := url.Parse(endpoint)
-		if err != nil {
-			return nil, err
-		}
-		endpoints[name] = u
-	}
-
-	// Load grant files
-	t := &Store{
-		path:          abspath,
-		caPool:        nil,
-		httpClient:    &http.Client{},
-		fetchTime:     time.Millisecond,
-		baseEndpoints: endpoints,
-	}
-
-	if err := t.reload(); err != nil {
-		return nil, err
-	}
-
-	return t, nil
-}
-
-func (t *Store) reload() error {
-	t.Lock()
-	defer t.Unlock()
-
-	matches, err := filepath.Glob(filepath.Join(t.path, "*.json"))
-	if err != nil {
-		return err
-	}
-	statements := make([]*trustgraph.Statement, len(matches))
-	for i, match := range matches {
-		f, err := os.Open(match)
-		if err != nil {
-			return fmt.Errorf("Error opening %q: %s", match, err)
-		}
-		statements[i], err = trustgraph.LoadStatement(f, nil)
-		if err != nil {
-			f.Close()
-			return fmt.Errorf("Error loading %q: %s", match, err)
-		}
-		f.Close()
-	}
-	if len(statements) == 0 {
-		if t.autofetch {
-			logrus.Debugf("No grants, fetching")
-			t.fetcher = time.AfterFunc(t.fetchTime, t.fetch)
-		}
-		return nil
-	}
-
-	grants, expiration, err := trustgraph.CollapseStatements(statements, true)
-	if err != nil {
-		return err
-	}
-
-	t.expiration = expiration
-	t.graph = trustgraph.NewMemoryGraph(grants)
-	logrus.Debugf("Reloaded graph with %d grants expiring at %s", len(grants), expiration)
-
-	if t.autofetch {
-		nextFetch := expiration.Sub(time.Now())
-		if nextFetch < 0 {
-			nextFetch = defaultFetchtime
-		} else {
-			nextFetch = time.Duration(0.8 * (float64)(nextFetch))
-		}
-		t.fetcher = time.AfterFunc(nextFetch, t.fetch)
-	}
-
-	return nil
-}
-
-func (t *Store) fetchBaseGraph(u *url.URL) (*trustgraph.Statement, error) {
-	req := &http.Request{
-		Method:     "GET",
-		URL:        u,
-		Proto:      "HTTP/1.1",
-		ProtoMajor: 1,
-		ProtoMinor: 1,
-		Header:     make(http.Header),
-		Body:       nil,
-		Host:       u.Host,
-	}
-
-	resp, err := t.httpClient.Do(req)
-	if err != nil {
-		return nil, err
-	}
-	if resp.StatusCode == 404 {
-		return nil, errors.New("base graph does not exist")
-	}
-
-	defer resp.Body.Close()
-
-	return trustgraph.LoadStatement(resp.Body, t.caPool)
-}
-
-// fetch retrieves updated base graphs. This function cannot error, it
-// should only log errors
-func (t *Store) fetch() {
-	t.Lock()
-	defer t.Unlock()
-
-	if t.autofetch && t.fetcher == nil {
-		// Do nothing ??
-		return
-	}
-
-	fetchCount := 0
-	for bg, ep := range t.baseEndpoints {
-		statement, err := t.fetchBaseGraph(ep)
-		if err != nil {
-			logrus.Infof("Trust graph fetch failed: %s", err)
-			continue
-		}
-		b, err := statement.Bytes()
-		if err != nil {
-			logrus.Infof("Bad trust graph statement: %s", err)
-			continue
-		}
-		// TODO check if value differs
-		if err := ioutil.WriteFile(path.Join(t.path, bg+".json"), b, 0600); err != nil {
-			logrus.Infof("Error writing trust graph statement: %s", err)
-		}
-		fetchCount++
-	}
-	logrus.Debugf("Fetched %d base graphs at %s", fetchCount, time.Now())
-
-	if fetchCount > 0 {
-		go func() {
-			if err := t.reload(); err != nil {
-				logrus.Infof("Reload of trust graph failed: %s", err)
-			}
-		}()
-		t.fetchTime = defaultFetchtime
-		t.fetcher = nil
-	} else if t.autofetch {
-		maxTime := 10 * defaultFetchtime
-		t.fetchTime = time.Duration(1.5 * (float64)(t.fetchTime+time.Second))
-		if t.fetchTime > maxTime {
-			t.fetchTime = maxTime
-		}
-		t.fetcher = time.AfterFunc(t.fetchTime, t.fetch)
-	}
-}

+ 0 - 50
vendor/src/github.com/docker/libtrust/trustgraph/graph.go

@@ -1,50 +0,0 @@
-package trustgraph
-
-import "github.com/docker/libtrust"
-
-// TrustGraph represents a graph of authorization mapping
-// public keys to nodes and grants between nodes.
-type TrustGraph interface {
-	// Verifies that the given public key is allowed to perform
-	// the given action on the given node according to the trust
-	// graph.
-	Verify(libtrust.PublicKey, string, uint16) (bool, error)
-
-	// GetGrants returns an array of all grant chains which are used to
-	// allow the requested permission.
-	GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error)
-}
-
-// Grant represents a transfer of permission from one part of the
-// trust graph to another. This is the only way to delegate
-// permission between two different sub trees in the graph.
-type Grant struct {
-	// Subject is the namespace being granted
-	Subject string
-
-	// Permissions is a bit map of permissions
-	Permission uint16
-
-	// Grantee represents the node being granted
-	// a permission scope.  The grantee can be
-	// either a namespace item or a key id where namespace
-	// items will always start with a '/'.
-	Grantee string
-
-	// statement represents the statement used to create
-	// this object.
-	statement *Statement
-}
-
-// Permissions
-//  Read node 0x01 (can read node, no sub nodes)
-//  Write node 0x02 (can write to node object, cannot create subnodes)
-//  Read subtree 0x04 (delegates read to each sub node)
-//  Write subtree 0x08 (delegates write to each sub node, included create on the subject)
-//
-// Permission shortcuts
-// ReadItem = 0x01
-// WriteItem = 0x03
-// ReadAccess = 0x07
-// WriteAccess = 0x0F
-// Delegate = 0x0F

+ 0 - 133
vendor/src/github.com/docker/libtrust/trustgraph/memory_graph.go

@@ -1,133 +0,0 @@
-package trustgraph
-
-import (
-	"strings"
-
-	"github.com/docker/libtrust"
-)
-
-type grantNode struct {
-	grants   []*Grant
-	children map[string]*grantNode
-}
-
-type memoryGraph struct {
-	roots map[string]*grantNode
-}
-
-func newGrantNode() *grantNode {
-	return &grantNode{
-		grants:   []*Grant{},
-		children: map[string]*grantNode{},
-	}
-}
-
-// NewMemoryGraph returns a new in memory trust graph created from
-// a static list of grants.  This graph is immutable after creation
-// and any alterations should create a new instance.
-func NewMemoryGraph(grants []*Grant) TrustGraph {
-	roots := map[string]*grantNode{}
-	for _, grant := range grants {
-		parts := strings.Split(grant.Grantee, "/")
-		nodes := roots
-		var node *grantNode
-		var nodeOk bool
-		for _, part := range parts {
-			node, nodeOk = nodes[part]
-			if !nodeOk {
-				node = newGrantNode()
-				nodes[part] = node
-			}
-			if part != "" {
-				node.grants = append(node.grants, grant)
-			}
-			nodes = node.children
-		}
-	}
-	return &memoryGraph{roots}
-}
-
-func (g *memoryGraph) getGrants(name string) []*Grant {
-	nameParts := strings.Split(name, "/")
-	nodes := g.roots
-	var node *grantNode
-	var nodeOk bool
-	for _, part := range nameParts {
-		node, nodeOk = nodes[part]
-		if !nodeOk {
-			return nil
-		}
-		nodes = node.children
-	}
-	return node.grants
-}
-
-func isSubName(name, sub string) bool {
-	if strings.HasPrefix(name, sub) {
-		if len(name) == len(sub) || name[len(sub)] == '/' {
-			return true
-		}
-	}
-	return false
-}
-
-type walkFunc func(*Grant, []*Grant) bool
-
-func foundWalkFunc(*Grant, []*Grant) bool {
-	return true
-}
-
-func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool {
-	if visited == nil {
-		visited = map[*Grant]bool{}
-	}
-	grants := g.getGrants(start)
-	subGrants := make([]*Grant, 0, len(grants))
-	for _, grant := range grants {
-		if visited[grant] {
-			continue
-		}
-		visited[grant] = true
-		if grant.Permission&permission == permission {
-			if isSubName(target, grant.Subject) {
-				if f(grant, chain) {
-					return true
-				}
-			} else {
-				subGrants = append(subGrants, grant)
-			}
-		}
-	}
-	for _, grant := range subGrants {
-		var chainCopy []*Grant
-		if collect {
-			chainCopy = make([]*Grant, len(chain)+1)
-			copy(chainCopy, chain)
-			chainCopy[len(chainCopy)-1] = grant
-		} else {
-			chainCopy = nil
-		}
-
-		if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) {
-			return true
-		}
-	}
-	return false
-}
-
-func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) {
-	return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil
-}
-
-func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) {
-	grants := [][]*Grant{}
-	collect := func(grant *Grant, chain []*Grant) bool {
-		grantChain := make([]*Grant, len(chain)+1)
-		copy(grantChain, chain)
-		grantChain[len(grantChain)-1] = grant
-		grants = append(grants, grantChain)
-		return false
-	}
-	g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true)
-	return grants, nil
-}

+ 0 - 227
vendor/src/github.com/docker/libtrust/trustgraph/statement.go

@@ -1,227 +0,0 @@
-package trustgraph
-
-import (
-	"crypto/x509"
-	"encoding/json"
-	"io"
-	"io/ioutil"
-	"sort"
-	"strings"
-	"time"
-
-	"github.com/docker/libtrust"
-)
-
-type jsonGrant struct {
-	Subject    string `json:"subject"`
-	Permission uint16 `json:"permission"`
-	Grantee    string `json:"grantee"`
-}
-
-type jsonRevocation struct {
-	Subject    string `json:"subject"`
-	Revocation uint16 `json:"revocation"`
-	Grantee    string `json:"grantee"`
-}
-
-type jsonStatement struct {
-	Revocations []*jsonRevocation `json:"revocations"`
-	Grants      []*jsonGrant      `json:"grants"`
-	Expiration  time.Time         `json:"expiration"`
-	IssuedAt    time.Time         `json:"issuedAt"`
-}
-
-func (g *jsonGrant) Grant(statement *Statement) *Grant {
-	return &Grant{
-		Subject:    g.Subject,
-		Permission: g.Permission,
-		Grantee:    g.Grantee,
-		statement:  statement,
-	}
-}
-
-// Statement represents a set of grants made from a verifiable
-// authority.  A statement has an expiration associated with it
-// set by the authority.
-type Statement struct {
-	jsonStatement
-
-	signature *libtrust.JSONSignature
-}
-
-// IsExpired returns whether the statement has expired
-func (s *Statement) IsExpired() bool {
-	return s.Expiration.Before(time.Now().Add(-10 * time.Second))
-}
-
-// Bytes returns an indented json representation of the statement
-// in a byte array.  This value can be written to a file or stream
-// without alteration.
-func (s *Statement) Bytes() ([]byte, error) {
-	return s.signature.PrettySignature("signatures")
-}
-
-// LoadStatement loads and verifies a statement from an input stream.
-func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) {
-	b, err := ioutil.ReadAll(r)
-	if err != nil {
-		return nil, err
-	}
-	js, err := libtrust.ParsePrettySignature(b, "signatures")
-	if err != nil {
-		return nil, err
-	}
-	payload, err := js.Payload()
-	if err != nil {
-		return nil, err
-	}
-	var statement Statement
-	err = json.Unmarshal(payload, &statement.jsonStatement)
-	if err != nil {
-		return nil, err
-	}
-
-	if authority == nil {
-		_, err = js.Verify()
-		if err != nil {
-			return nil, err
-		}
-	} else {
-		_, err = js.VerifyChains(authority)
-		if err != nil {
-			return nil, err
-		}
-	}
-	statement.signature = js
-
-	return &statement, nil
-}
-
-// CreateStatements creates and signs a statement from a stream of grants
-// and revocations in a JSON array.
-func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) {
-	var statement Statement
-	err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants)
-	if err != nil {
-		return nil, err
-	}
-	err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations)
-	if err != nil {
-		return nil, err
-	}
-	statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration)
-	statement.jsonStatement.IssuedAt = time.Now().UTC()
-
-	b, err := json.MarshalIndent(&statement.jsonStatement, "", "   ")
-	if err != nil {
-		return nil, err
-	}
-
-	statement.signature, err = libtrust.NewJSONSignature(b)
-	if err != nil {
-		return nil, err
-	}
-	err = statement.signature.SignWithChain(key, chain)
-	if err != nil {
-		return nil, err
-	}
-
-	return &statement, nil
-}
-
-type statementList []*Statement
-
-func (s statementList) Len() int {
-	return len(s)
-}
-
-func (s statementList) Less(i, j int) bool {
-	return s[i].IssuedAt.Before(s[j].IssuedAt)
-}
-
-func (s statementList) Swap(i, j int) {
-	s[i], s[j] = s[j], s[i]
-}
-
-// CollapseStatements returns a single list of the valid statements as well as the
-// time when the next grant will expire.
-func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) {
-	sorted := make(statementList, 0, len(statements))
-	for _, statement := range statements {
-		if useExpired || !statement.IsExpired() {
-			sorted = append(sorted, statement)
-		}
-	}
-	sort.Sort(sorted)
-
-	var minExpired time.Time
-	var grantCount int
-	roots := map[string]*grantNode{}
-	for i, statement := range sorted {
-		if statement.Expiration.Before(minExpired) || i == 0 {
-			minExpired = statement.Expiration
-		}
-		for _, grant := range statement.Grants {
-			parts := strings.Split(grant.Grantee, "/")
-			nodes := roots
-			g := grant.Grant(statement)
-			grantCount = grantCount + 1
-
-			for _, part := range parts {
-				node, nodeOk := nodes[part]
-				if !nodeOk {
-					node = newGrantNode()
-					nodes[part] = node
-				}
-				node.grants = append(node.grants, g)
-				nodes = node.children
-			}
-		}
-
-		for _, revocation := range statement.Revocations {
-			parts := strings.Split(revocation.Grantee, "/")
-			nodes := roots
-
-			var node *grantNode
-			var nodeOk bool
-			for _, part := range parts {
-				node, nodeOk = nodes[part]
-				if !nodeOk {
-					break
-				}
-				nodes = node.children
-			}
-			if node != nil {
-				for _, grant := range node.grants {
-					if isSubName(grant.Subject, revocation.Subject) {
-						grant.Permission = grant.Permission &^ revocation.Revocation
-					}
-				}
-			}
-		}
-	}
-
-	retGrants := make([]*Grant, 0, grantCount)
-	for _, rootNodes := range roots {
-		retGrants = append(retGrants, rootNodes.grants...)
-	}
-
-	return retGrants, minExpired, nil
-}
-
-// FilterStatements filters the statements to statements including the given grants.
-func FilterStatements(grants []*Grant) ([]*Statement, error) {
-	statements := map[*Statement]bool{}
-	for _, grant := range grants {
-		if grant.statement != nil {
-			statements[grant.statement] = true
-		}
-	}
-	retStatements := make([]*Statement, len(statements))
-	var i int
-	for statement := range statements {
-		retStatements[i] = statement
-		i++
-	}
-	return retStatements, nil
-}

Beberapa file tidak ditampilkan karena terlalu banyak file yang berubah dalam diff ini