浏览代码

Merge pull request #29734 from tonistiigi/1.13-plugins

[v1.13] plugins updates
Brian Goff 8 年之前
父节点
当前提交
3e0df05ec4
共有 89 个文件被更改,包括 2436 次插入1604 次删除
  1. 7 6
      api/server/router/plugin/backend.go
  2. 2 2
      api/server/router/plugin/plugin.go
  3. 125 9
      api/server/router/plugin/plugin_routes.go
  4. 24 10
      api/swagger.yaml
  5. 6 0
      api/types/client.go
  6. 7 4
      api/types/configs.go
  7. 16 6
      api/types/plugin.go
  8. 1 1
      cli/command/container/create.go
  9. 1 1
      cli/command/image/build.go
  10. 1 1
      cli/command/image/pull.go
  11. 18 5
      cli/command/image/trust.go
  12. 2 2
      cli/command/plugin/create.go
  13. 8 15
      cli/command/plugin/disable.go
  14. 1 14
      cli/command/plugin/enable.go
  15. 102 13
      cli/command/plugin/install.go
  16. 2 2
      cli/command/plugin/list.go
  17. 19 1
      cli/command/plugin/push.go
  18. 1 15
      cli/command/plugin/remove.go
  19. 1 19
      cli/command/plugin/set.go
  20. 5 1
      cli/command/system/inspect.go
  21. 12 1
      cli/trust/trust.go
  22. 3 3
      client/interface.go
  23. 9 2
      client/plugin_disable.go
  24. 3 2
      client/plugin_disable_test.go
  25. 46 23
      client/plugin_install.go
  26. 7 3
      client/plugin_push.go
  27. 2 2
      client/plugin_push_test.go
  28. 1 2
      cmd/dockerd/daemon.go
  29. 2 0
      daemon/cluster/executor/backend.go
  30. 2 7
      daemon/cluster/executor/container/executor.go
  31. 23 45
      daemon/daemon.go
  32. 0 10
      daemon/daemon_solaris.go
  33. 0 57
      daemon/daemon_unix.go
  34. 0 4
      daemon/daemon_windows.go
  35. 14 10
      daemon/image_pull.go
  36. 17 12
      daemon/image_push.go
  37. 13 0
      daemon/initlayer/setup_solaris.go
  38. 69 0
      daemon/initlayer/setup_unix.go
  39. 13 0
      daemon/initlayer/setup_windows.go
  40. 233 0
      distribution/config.go
  41. 7 0
      distribution/metadata/v1_id_service.go
  42. 19 0
      distribution/metadata/v2_metadata_service.go
  43. 4 29
      distribution/pull.go
  44. 5 3
      distribution/pull_v1.go
  45. 88 70
      distribution/pull_v2.go
  46. 3 36
      distribution/push.go
  47. 19 13
      distribution/push_v1.go
  48. 37 34
      distribution/push_v2.go
  49. 5 3
      distribution/push_v2_test.go
  50. 30 5
      distribution/registry.go
  51. 6 3
      distribution/registry_unit_test.go
  52. 44 0
      distribution/utils/progress.go
  53. 15 146
      docs/extend/index.md
  54. 2 2
      docs/reference/commandline/plugin_create.md
  55. 4 2
      docs/reference/commandline/plugin_disable.md
  56. 1 2
      docs/reference/commandline/plugin_inspect.md
  57. 1 0
      docs/reference/commandline/plugin_install.md
  58. 2 2
      integration-cli/docker_cli_authz_plugin_v2_test.go
  59. 9 2
      integration-cli/docker_cli_daemon_plugins_test.go
  60. 6 6
      integration-cli/docker_cli_inspect_test.go
  61. 1 1
      integration-cli/docker_cli_network_unix_test.go
  62. 94 15
      integration-cli/docker_cli_plugins_test.go
  63. 2 2
      integration-cli/docker_utils.go
  64. 23 0
      integration-cli/trust_server.go
  65. 11 0
      pkg/progress/progress.go
  66. 469 161
      plugin/backend_linux.go
  67. 8 11
      plugin/backend_unsupported.go
  68. 181 0
      plugin/blobstore.go
  69. 1 6
      plugin/defs.go
  70. 0 222
      plugin/distribution/pull.go
  71. 0 134
      plugin/distribution/push.go
  72. 0 12
      plugin/distribution/types.go
  73. 166 58
      plugin/manager.go
  74. 89 12
      plugin/manager_linux.go
  75. 47 71
      plugin/store.go
  76. 3 4
      plugin/store_test.go
  77. 14 183
      plugin/v2/plugin.go
  78. 121 0
      plugin/v2/plugin_linux.go
  79. 14 0
      plugin/v2/plugin_unsupported.go
  80. 1 1
      vendor.conf
  81. 5 0
      vendor/github.com/docker/distribution/digest/digest.go
  82. 5 0
      vendor/github.com/docker/distribution/digest/digester.go
  83. 7 2
      vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
  84. 17 13
      vendor/github.com/docker/distribution/manifest/schema2/builder.go
  85. 6 2
      vendor/github.com/docker/distribution/manifest/schema2/manifest.go
  86. 3 1
      vendor/github.com/docker/distribution/registry/client/auth/session.go
  87. 23 17
      volume/drivers/extpoint.go
  88. 0 4
      volume/drivers/extpoint_test.go
  89. 0 4
      volume/store/store_test.go

+ 7 - 6
api/server/router/plugin/backend.go

@@ -5,19 +5,20 @@ import (
 	"net/http"
 	"net/http"
 
 
 	enginetypes "github.com/docker/docker/api/types"
 	enginetypes "github.com/docker/docker/api/types"
+	"github.com/docker/docker/reference"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
 // Backend for Plugin
 // Backend for Plugin
 type Backend interface {
 type Backend interface {
-	Disable(name string) error
+	Disable(name string, config *enginetypes.PluginDisableConfig) error
 	Enable(name string, config *enginetypes.PluginEnableConfig) error
 	Enable(name string, config *enginetypes.PluginEnableConfig) error
 	List() ([]enginetypes.Plugin, error)
 	List() ([]enginetypes.Plugin, error)
-	Inspect(name string) (enginetypes.Plugin, error)
+	Inspect(name string) (*enginetypes.Plugin, error)
 	Remove(name string, config *enginetypes.PluginRmConfig) error
 	Remove(name string, config *enginetypes.PluginRmConfig) error
 	Set(name string, args []string) error
 	Set(name string, args []string) error
-	Privileges(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
-	Pull(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges) error
-	Push(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) error
-	CreateFromContext(ctx context.Context, tarCtx io.Reader, options *enginetypes.PluginCreateOptions) error
+	Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
+	Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
+	Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error
+	CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error
 }
 }

+ 2 - 2
api/server/router/plugin/plugin.go

@@ -30,8 +30,8 @@ func (r *pluginRouter) initRoutes() {
 		router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
 		router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
 		router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH?
 		router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH?
 		router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),
 		router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),
-		router.NewPostRoute("/plugins/pull", r.pullPlugin),
-		router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin),
+		router.Cancellable(router.NewPostRoute("/plugins/pull", r.pullPlugin)),
+		router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin)),
 		router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin),
 		router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin),
 		router.NewPostRoute("/plugins/create", r.createPlugin),
 		router.NewPostRoute("/plugins/create", r.createPlugin),
 	}
 	}

+ 125 - 9
api/server/router/plugin/plugin_routes.go

@@ -7,8 +7,13 @@ import (
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 
 
+	distreference "github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/server/httputils"
 	"github.com/docker/docker/api/server/httputils"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/streamformatter"
+	"github.com/docker/docker/reference"
+	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
@@ -34,6 +39,48 @@ func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig)
 	return metaHeaders, authConfig
 	return metaHeaders, authConfig
 }
 }
 
 
+// parseRemoteRef parses the remote reference into a reference.Named
+// returning the tag associated with the reference. In the case the
+// given reference string includes both digest and tag, the returned
+// reference will have the digest without the tag, but the tag will
+// be returned.
+func parseRemoteRef(remote string) (reference.Named, string, error) {
+	// Parse remote reference, supporting remotes with name and tag
+	// NOTE: Using distribution reference to handle references
+	// containing both a name and digest
+	remoteRef, err := distreference.ParseNamed(remote)
+	if err != nil {
+		return nil, "", err
+	}
+
+	var tag string
+	if t, ok := remoteRef.(distreference.Tagged); ok {
+		tag = t.Tag()
+	}
+
+	// Convert distribution reference to docker reference
+	// TODO: remove when docker reference changes reconciled upstream
+	ref, err := reference.WithName(remoteRef.Name())
+	if err != nil {
+		return nil, "", err
+	}
+	if d, ok := remoteRef.(distreference.Digested); ok {
+		ref, err = reference.WithDigest(ref, d.Digest())
+		if err != nil {
+			return nil, "", err
+		}
+	} else if tag != "" {
+		ref, err = reference.WithTag(ref, tag)
+		if err != nil {
+			return nil, "", err
+		}
+	} else {
+		ref = reference.WithDefaultTag(ref)
+	}
+
+	return ref, tag, nil
+}
+
 func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := httputils.ParseForm(r); err != nil {
 	if err := httputils.ParseForm(r); err != nil {
 		return err
 		return err
@@ -41,7 +88,12 @@ func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter
 
 
 	metaHeaders, authConfig := parseHeaders(r.Header)
 	metaHeaders, authConfig := parseHeaders(r.Header)
 
 
-	privileges, err := pr.backend.Privileges(r.FormValue("name"), metaHeaders, authConfig)
+	ref, _, err := parseRemoteRef(r.FormValue("remote"))
+	if err != nil {
+		return err
+	}
+
+	privileges, err := pr.backend.Privileges(ctx, ref, metaHeaders, authConfig)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -50,20 +102,66 @@ func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter
 
 
 func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := httputils.ParseForm(r); err != nil {
 	if err := httputils.ParseForm(r); err != nil {
-		return err
+		return errors.Wrap(err, "failed to parse form")
 	}
 	}
 
 
 	var privileges types.PluginPrivileges
 	var privileges types.PluginPrivileges
-	if err := json.NewDecoder(r.Body).Decode(&privileges); err != nil {
-		return err
+	dec := json.NewDecoder(r.Body)
+	if err := dec.Decode(&privileges); err != nil {
+		return errors.Wrap(err, "failed to parse privileges")
+	}
+	if dec.More() {
+		return errors.New("invalid privileges")
 	}
 	}
 
 
 	metaHeaders, authConfig := parseHeaders(r.Header)
 	metaHeaders, authConfig := parseHeaders(r.Header)
 
 
-	if err := pr.backend.Pull(r.FormValue("name"), metaHeaders, authConfig, privileges); err != nil {
+	ref, tag, err := parseRemoteRef(r.FormValue("remote"))
+	if err != nil {
 		return err
 		return err
 	}
 	}
-	w.WriteHeader(http.StatusCreated)
+
+	name := r.FormValue("name")
+	if name == "" {
+		if _, ok := ref.(reference.Canonical); ok {
+			trimmed := reference.TrimNamed(ref)
+			if tag != "" {
+				nt, err := reference.WithTag(trimmed, tag)
+				if err != nil {
+					return err
+				}
+				name = nt.String()
+			} else {
+				name = reference.WithDefaultTag(trimmed).String()
+			}
+		} else {
+			name = ref.String()
+		}
+	} else {
+		localRef, err := reference.ParseNamed(name)
+		if err != nil {
+			return err
+		}
+		if _, ok := localRef.(reference.Canonical); ok {
+			return errors.New("cannot use digest in plugin tag")
+		}
+		if distreference.IsNameOnly(localRef) {
+			// TODO: log change in name to out stream
+			name = reference.WithDefaultTag(localRef).String()
+		}
+	}
+	w.Header().Set("Docker-Plugin-Name", name)
+
+	w.Header().Set("Content-Type", "application/json")
+	output := ioutils.NewWriteFlusher(w)
+
+	if err := pr.backend.Pull(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil {
+		if !output.Flushed() {
+			return err
+		}
+		output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
+	}
+
 	return nil
 	return nil
 }
 }
 
 
@@ -99,7 +197,16 @@ func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter,
 }
 }
 
 
 func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
-	return pr.backend.Disable(vars["name"])
+	if err := httputils.ParseForm(r); err != nil {
+		return err
+	}
+
+	name := vars["name"]
+	config := &types.PluginDisableConfig{
+		ForceDisable: httputils.BoolValue(r, "force"),
+	}
+
+	return pr.backend.Disable(name, config)
 }
 }
 
 
 func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -116,12 +223,21 @@ func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter,
 
 
 func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := httputils.ParseForm(r); err != nil {
 	if err := httputils.ParseForm(r); err != nil {
-		return err
+		return errors.Wrap(err, "failed to parse form")
 	}
 	}
 
 
 	metaHeaders, authConfig := parseHeaders(r.Header)
 	metaHeaders, authConfig := parseHeaders(r.Header)
 
 
-	return pr.backend.Push(vars["name"], metaHeaders, authConfig)
+	w.Header().Set("Content-Type", "application/json")
+	output := ioutils.NewWriteFlusher(w)
+
+	if err := pr.backend.Push(ctx, vars["name"], metaHeaders, authConfig, output); err != nil {
+		if !output.Flushed() {
+			return err
+		}
+		output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
+	}
+	return nil
 }
 }
 
 
 func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {

+ 24 - 10
api/swagger.yaml

@@ -1346,16 +1346,13 @@ definitions:
   Plugin:
   Plugin:
     description: "A plugin for the Engine API"
     description: "A plugin for the Engine API"
     type: "object"
     type: "object"
-    required: [Settings, Enabled, Config, Name, Tag]
+    required: [Settings, Enabled, Config, Name]
     properties:
     properties:
       Id:
       Id:
         type: "string"
         type: "string"
       Name:
       Name:
         type: "string"
         type: "string"
         x-nullable: false
         x-nullable: false
-      Tag:
-        type: "string"
-        x-nullable: false
       Enabled:
       Enabled:
         description: "True when the plugin is running. False when the plugin is not running, only installed."
         description: "True when the plugin is running. False when the plugin is not running, only installed."
         type: "boolean"
         type: "boolean"
@@ -1391,7 +1388,7 @@ definitions:
           - Documentation
           - Documentation
           - Interface
           - Interface
           - Entrypoint
           - Entrypoint
-          - Workdir
+          - WorkDir
           - Network
           - Network
           - Linux
           - Linux
           - PropagatedMount
           - PropagatedMount
@@ -1422,7 +1419,7 @@ definitions:
             type: "array"
             type: "array"
             items:
             items:
               type: "string"
               type: "string"
-          Workdir:
+          WorkDir:
             type: "string"
             type: "string"
             x-nullable: false
             x-nullable: false
           User:
           User:
@@ -1489,6 +1486,15 @@ definitions:
                 type: "array"
                 type: "array"
                 items:
                 items:
                   type: "string"
                   type: "string"
+          rootfs:
+            type: "object"
+            properties:
+              type:
+                type: "string"
+              diff_ids:
+                type: "array"
+                items:
+                  type: "string"
     example:
     example:
       Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"
       Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"
       Name: "tiborvass/no-remove"
       Name: "tiborvass/no-remove"
@@ -1527,7 +1533,7 @@ definitions:
         Entrypoint:
         Entrypoint:
           - "plugin-no-remove"
           - "plugin-no-remove"
           - "/data"
           - "/data"
-        Workdir: ""
+        WorkDir: ""
         User: {}
         User: {}
         Network:
         Network:
           Type: "host"
           Type: "host"
@@ -6396,7 +6402,7 @@ paths:
                   Entrypoint:
                   Entrypoint:
                     - "plugin-no-remove"
                     - "plugin-no-remove"
                     - "/data"
                     - "/data"
-                  Workdir: ""
+                  WorkDir: ""
                   User: {}
                   User: {}
                   Network:
                   Network:
                     Type: "host"
                     Type: "host"
@@ -6502,14 +6508,22 @@ paths:
           schema:
           schema:
             $ref: "#/definitions/ErrorResponse"
             $ref: "#/definitions/ErrorResponse"
       parameters:
       parameters:
-        - name: "name"
+        - name: "remote"
           in: "query"
           in: "query"
           description: |
           description: |
-            The plugin to install.
+            Remote reference for plugin to install.
 
 
             The `:latest` tag is optional, and is used as the default if omitted.
             The `:latest` tag is optional, and is used as the default if omitted.
           required: true
           required: true
           type: "string"
           type: "string"
+        - name: "name"
+          in: "query"
+          description: |
+            Local name for the pulled plugin.
+
+            The `:latest` tag is optional, and is used as the default if omitted.
+          required: false
+          type: "string"
         - name: "X-Registry-Auth"
         - name: "X-Registry-Auth"
           in: "header"
           in: "header"
           description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
           description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"

+ 6 - 0
api/types/client.go

@@ -340,11 +340,17 @@ type PluginEnableOptions struct {
 	Timeout int
 	Timeout int
 }
 }
 
 
+// PluginDisableOptions holds parameters to disable plugins.
+type PluginDisableOptions struct {
+	Force bool
+}
+
 // PluginInstallOptions holds parameters to install a plugin.
 // PluginInstallOptions holds parameters to install a plugin.
 type PluginInstallOptions struct {
 type PluginInstallOptions struct {
 	Disabled              bool
 	Disabled              bool
 	AcceptAllPermissions  bool
 	AcceptAllPermissions  bool
 	RegistryAuth          string // RegistryAuth is the base64 encoded credentials for the registry
 	RegistryAuth          string // RegistryAuth is the base64 encoded credentials for the registry
+	RemoteRef             string // RemoteRef is the plugin name on the registry
 	PrivilegeFunc         RequestPrivilegeFunc
 	PrivilegeFunc         RequestPrivilegeFunc
 	AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
 	AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
 	Args                  []string
 	Args                  []string

+ 7 - 4
api/types/configs.go

@@ -53,14 +53,17 @@ type ExecConfig struct {
 	Cmd          []string // Execution commands and args
 	Cmd          []string // Execution commands and args
 }
 }
 
 
-// PluginRmConfig holds arguments for the plugin remove
-// operation. This struct is used to tell the backend what operations
-// to perform.
+// PluginRmConfig holds arguments for plugin remove.
 type PluginRmConfig struct {
 type PluginRmConfig struct {
 	ForceRemove bool
 	ForceRemove bool
 }
 }
 
 
-// PluginEnableConfig holds arguments for the plugin enable
+// PluginEnableConfig holds arguments for plugin enable
 type PluginEnableConfig struct {
 type PluginEnableConfig struct {
 	Timeout int
 	Timeout int
 }
 }
+
+// PluginDisableConfig holds arguments for plugin disable.
+type PluginDisableConfig struct {
+	ForceDisable bool
+}

+ 16 - 6
api/types/plugin.go

@@ -25,10 +25,6 @@ type Plugin struct {
 	// settings
 	// settings
 	// Required: true
 	// Required: true
 	Settings PluginSettings `json:"Settings"`
 	Settings PluginSettings `json:"Settings"`
-
-	// tag
-	// Required: true
-	Tag string `json:"Tag"`
 }
 }
 
 
 // PluginConfig The config of a plugin.
 // PluginConfig The config of a plugin.
@@ -78,9 +74,12 @@ type PluginConfig struct {
 	// user
 	// user
 	User PluginConfigUser `json:"User,omitempty"`
 	User PluginConfigUser `json:"User,omitempty"`
 
 
-	// workdir
+	// work dir
 	// Required: true
 	// Required: true
-	Workdir string `json:"Workdir"`
+	WorkDir string `json:"WorkDir"`
+
+	// rootfs
+	Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
 }
 }
 
 
 // PluginConfigArgs plugin config args
 // PluginConfigArgs plugin config args
@@ -143,6 +142,17 @@ type PluginConfigNetwork struct {
 	Type string `json:"Type"`
 	Type string `json:"Type"`
 }
 }
 
 
+// PluginConfigRootfs plugin config rootfs
+// swagger:model PluginConfigRootfs
+type PluginConfigRootfs struct {
+
+	// diff ids
+	DiffIds []string `json:"diff_ids"`
+
+	// type
+	Type string `json:"type,omitempty"`
+}
+
 // PluginConfigUser plugin config user
 // PluginConfigUser plugin config user
 // swagger:model PluginConfigUser
 // swagger:model PluginConfigUser
 type PluginConfigUser struct {
 type PluginConfigUser struct {

+ 1 - 1
cli/command/container/create.go

@@ -170,7 +170,7 @@ func createContainer(ctx context.Context, dockerCli *command.DockerCli, config *
 
 
 		if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() {
 		if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() {
 			var err error
 			var err error
-			trustedRef, err = image.TrustedReference(ctx, dockerCli, ref)
+			trustedRef, err = image.TrustedReference(ctx, dockerCli, ref, nil)
 			if err != nil {
 			if err != nil {
 				return nil, err
 				return nil, err
 			}
 			}

+ 1 - 1
cli/command/image/build.go

@@ -235,7 +235,7 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
 	var resolvedTags []*resolvedTag
 	var resolvedTags []*resolvedTag
 	if command.IsTrusted() {
 	if command.IsTrusted() {
 		translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) {
 		translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) {
-			return TrustedReference(ctx, dockerCli, ref)
+			return TrustedReference(ctx, dockerCli, ref, nil)
 		}
 		}
 		// Wrap the tar archive to replace the Dockerfile entry with the rewritten
 		// Wrap the tar archive to replace the Dockerfile entry with the rewritten
 		// Dockerfile which uses trusted pulls.
 		// Dockerfile which uses trusted pulls.

+ 1 - 1
cli/command/image/pull.go

@@ -74,7 +74,7 @@ func runPull(dockerCli *command.DockerCli, opts pullOptions) error {
 		err = imagePullPrivileged(ctx, dockerCli, authConfig, distributionRef.String(), requestPrivilege, opts.all)
 		err = imagePullPrivileged(ctx, dockerCli, authConfig, distributionRef.String(), requestPrivilege, opts.all)
 	}
 	}
 	if err != nil {
 	if err != nil {
-		if strings.Contains(err.Error(), "target is a plugin") {
+		if strings.Contains(err.Error(), "target is plugin") {
 			return errors.New(err.Error() + " - Use `docker plugin install`")
 			return errors.New(err.Error() + " - Use `docker plugin install`")
 		}
 		}
 		return err
 		return err

+ 18 - 5
cli/command/image/trust.go

@@ -39,6 +39,11 @@ func trustedPush(ctx context.Context, cli *command.DockerCli, repoInfo *registry
 
 
 	defer responseBody.Close()
 	defer responseBody.Close()
 
 
+	return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody)
+}
+
+// PushTrustedReference pushes a canonical reference to the trust server.
+func PushTrustedReference(cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error {
 	// If it is a trusted push we would like to find the target entry which match the
 	// If it is a trusted push we would like to find the target entry which match the
 	// tag provided in the function and then do an AddTarget later.
 	// tag provided in the function and then do an AddTarget later.
 	target := &client.Target{}
 	target := &client.Target{}
@@ -75,14 +80,14 @@ func trustedPush(ctx context.Context, cli *command.DockerCli, repoInfo *registry
 	default:
 	default:
 		// We want trust signatures to always take an explicit tag,
 		// We want trust signatures to always take an explicit tag,
 		// otherwise it will act as an untrusted push.
 		// otherwise it will act as an untrusted push.
-		if err = jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil); err != nil {
+		if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), nil); err != nil {
 			return err
 			return err
 		}
 		}
 		fmt.Fprintln(cli.Out(), "No tag specified, skipping trust metadata push")
 		fmt.Fprintln(cli.Out(), "No tag specified, skipping trust metadata push")
 		return nil
 		return nil
 	}
 	}
 
 
-	if err = jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), handleTarget); err != nil {
+	if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), handleTarget); err != nil {
 		return err
 		return err
 	}
 	}
 
 
@@ -315,8 +320,16 @@ func imagePullPrivileged(ctx context.Context, cli *command.DockerCli, authConfig
 }
 }
 
 
 // TrustedReference returns the canonical trusted reference for an image reference
 // TrustedReference returns the canonical trusted reference for an image reference
-func TrustedReference(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged) (reference.Canonical, error) {
-	repoInfo, err := registry.ParseRepositoryInfo(ref)
+func TrustedReference(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) {
+	var (
+		repoInfo *registry.RepositoryInfo
+		err      error
+	)
+	if rs != nil {
+		repoInfo, err = rs.ResolveRepository(ref)
+	} else {
+		repoInfo, err = registry.ParseRepositoryInfo(ref)
+	}
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -332,7 +345,7 @@ func TrustedReference(ctx context.Context, cli *command.DockerCli, ref reference
 
 
 	t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
 	t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
 	if err != nil {
 	if err != nil {
-		return nil, err
+		return nil, trust.NotaryError(repoInfo.FullName(), err)
 	}
 	}
 	// Only list tags in the top level targets role or the releases delegation role - ignore
 	// Only list tags in the top level targets role or the releases delegation role - ignore
 	// all other delegation roles
 	// all other delegation roles

+ 2 - 2
cli/command/plugin/create.go

@@ -64,8 +64,8 @@ func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command {
 	options := pluginCreateOptions{}
 	options := pluginCreateOptions{}
 
 
 	cmd := &cobra.Command{
 	cmd := &cobra.Command{
-		Use:   "create [OPTIONS] PLUGIN[:tag] PATH-TO-ROOTFS(rootfs + config.json)",
-		Short: "Create a plugin from a rootfs and config",
+		Use:   "create [OPTIONS] PLUGIN PLUGIN-DATA-DIR",
+		Short: "Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.",
 		Args:  cli.RequiresMinArgs(2),
 		Args:  cli.RequiresMinArgs(2),
 		RunE: func(cmd *cobra.Command, args []string) error {
 		RunE: func(cmd *cobra.Command, args []string) error {
 			options.repoName = args[0]
 			options.repoName = args[0]

+ 8 - 15
cli/command/plugin/disable.go

@@ -3,39 +3,32 @@ package plugin
 import (
 import (
 	"fmt"
 	"fmt"
 
 
+	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
-	"github.com/docker/docker/reference"
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
 func newDisableCommand(dockerCli *command.DockerCli) *cobra.Command {
 func newDisableCommand(dockerCli *command.DockerCli) *cobra.Command {
+	var force bool
+
 	cmd := &cobra.Command{
 	cmd := &cobra.Command{
 		Use:   "disable PLUGIN",
 		Use:   "disable PLUGIN",
 		Short: "Disable a plugin",
 		Short: "Disable a plugin",
 		Args:  cli.ExactArgs(1),
 		Args:  cli.ExactArgs(1),
 		RunE: func(cmd *cobra.Command, args []string) error {
 		RunE: func(cmd *cobra.Command, args []string) error {
-			return runDisable(dockerCli, args[0])
+			return runDisable(dockerCli, args[0], force)
 		},
 		},
 	}
 	}
 
 
+	flags := cmd.Flags()
+	flags.BoolVarP(&force, "force", "f", false, "Force the disable of an active plugin")
 	return cmd
 	return cmd
 }
 }
 
 
-func runDisable(dockerCli *command.DockerCli, name string) error {
-	named, err := reference.ParseNamed(name) // FIXME: validate
-	if err != nil {
-		return err
-	}
-	if reference.IsNameOnly(named) {
-		named = reference.WithDefaultTag(named)
-	}
-	ref, ok := named.(reference.NamedTagged)
-	if !ok {
-		return fmt.Errorf("invalid name: %s", named.String())
-	}
-	if err := dockerCli.Client().PluginDisable(context.Background(), ref.String()); err != nil {
+func runDisable(dockerCli *command.DockerCli, name string, force bool) error {
+	if err := dockerCli.Client().PluginDisable(context.Background(), name, types.PluginDisableOptions{Force: force}); err != nil {
 		return err
 		return err
 	}
 	}
 	fmt.Fprintln(dockerCli.Out(), name)
 	fmt.Fprintln(dockerCli.Out(), name)

+ 1 - 14
cli/command/plugin/enable.go

@@ -6,7 +6,6 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
-	"github.com/docker/docker/reference"
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
@@ -36,23 +35,11 @@ func newEnableCommand(dockerCli *command.DockerCli) *cobra.Command {
 
 
 func runEnable(dockerCli *command.DockerCli, opts *enableOpts) error {
 func runEnable(dockerCli *command.DockerCli, opts *enableOpts) error {
 	name := opts.name
 	name := opts.name
-
-	named, err := reference.ParseNamed(name) // FIXME: validate
-	if err != nil {
-		return err
-	}
-	if reference.IsNameOnly(named) {
-		named = reference.WithDefaultTag(named)
-	}
-	ref, ok := named.(reference.NamedTagged)
-	if !ok {
-		return fmt.Errorf("invalid name: %s", named.String())
-	}
 	if opts.timeout < 0 {
 	if opts.timeout < 0 {
 		return fmt.Errorf("negative timeout %d is invalid", opts.timeout)
 		return fmt.Errorf("negative timeout %d is invalid", opts.timeout)
 	}
 	}
 
 
-	if err := dockerCli.Client().PluginEnable(context.Background(), ref.String(), types.PluginEnableOptions{Timeout: opts.timeout}); err != nil {
+	if err := dockerCli.Client().PluginEnable(context.Background(), name, types.PluginEnableOptions{Timeout: opts.timeout}); err != nil {
 		return err
 		return err
 	}
 	}
 	fmt.Fprintln(dockerCli.Out(), name)
 	fmt.Fprintln(dockerCli.Out(), name)

+ 102 - 13
cli/command/plugin/install.go

@@ -2,12 +2,17 @@ package plugin
 
 
 import (
 import (
 	"bufio"
 	"bufio"
+	"errors"
 	"fmt"
 	"fmt"
 	"strings"
 	"strings"
 
 
+	distreference "github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	registrytypes "github.com/docker/docker/api/types/registry"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
+	"github.com/docker/docker/cli/command/image"
+	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
@@ -16,6 +21,7 @@ import (
 
 
 type pluginOptions struct {
 type pluginOptions struct {
 	name       string
 	name       string
+	alias      string
 	grantPerms bool
 	grantPerms bool
 	disable    bool
 	disable    bool
 	args       []string
 	args       []string
@@ -39,41 +45,115 @@ func newInstallCommand(dockerCli *command.DockerCli) *cobra.Command {
 	flags := cmd.Flags()
 	flags := cmd.Flags()
 	flags.BoolVar(&options.grantPerms, "grant-all-permissions", false, "Grant all permissions necessary to run the plugin")
 	flags.BoolVar(&options.grantPerms, "grant-all-permissions", false, "Grant all permissions necessary to run the plugin")
 	flags.BoolVar(&options.disable, "disable", false, "Do not enable the plugin on install")
 	flags.BoolVar(&options.disable, "disable", false, "Do not enable the plugin on install")
+	flags.StringVar(&options.alias, "alias", "", "Local name for plugin")
+
+	command.AddTrustedFlags(flags, true)
 
 
 	return cmd
 	return cmd
 }
 }
 
 
-func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error {
-	named, err := reference.ParseNamed(opts.name) // FIXME: validate
+func getRepoIndexFromUnnormalizedRef(ref distreference.Named) (*registrytypes.IndexInfo, error) {
+	named, err := reference.ParseNamed(ref.Name())
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
-	if reference.IsNameOnly(named) {
-		named = reference.WithDefaultTag(named)
+
+	repoInfo, err := registry.ParseRepositoryInfo(named)
+	if err != nil {
+		return nil, err
 	}
 	}
-	ref, ok := named.(reference.NamedTagged)
-	if !ok {
-		return fmt.Errorf("invalid name: %s", named.String())
+
+	return repoInfo.Index, nil
+}
+
+type pluginRegistryService struct {
+	registry.Service
+}
+
+func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) {
+	repoInfo, err = s.Service.ResolveRepository(name)
+	if repoInfo != nil {
+		repoInfo.Class = "plugin"
+	}
+	return
+}
+
+func newRegistryService() registry.Service {
+	return pluginRegistryService{
+		Service: registry.NewService(registry.ServiceOptions{V2Only: true}),
+	}
+}
+
+func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error {
+	// Parse name using distribution reference package to support name
+	// containing both tag and digest. Names with both tag and digest
+	// will be treated by the daemon as a pull by digest with
+	// an alias for the tag (if no alias is provided).
+	ref, err := distreference.ParseNamed(opts.name)
+	if err != nil {
+		return err
 	}
 	}
 
 
+	alias := ""
+	if opts.alias != "" {
+		aref, err := reference.ParseNamed(opts.alias)
+		if err != nil {
+			return err
+		}
+		aref = reference.WithDefaultTag(aref)
+		if _, ok := aref.(reference.NamedTagged); !ok {
+			return fmt.Errorf("invalid name: %s", opts.alias)
+		}
+		alias = aref.String()
+	}
 	ctx := context.Background()
 	ctx := context.Background()
 
 
-	repoInfo, err := registry.ParseRepositoryInfo(named)
+	index, err := getRepoIndexFromUnnormalizedRef(ref)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index)
+	remote := ref.String()
+
+	_, isCanonical := ref.(distreference.Canonical)
+	if command.IsTrusted() && !isCanonical {
+		if alias == "" {
+			alias = ref.String()
+		}
+		var nt reference.NamedTagged
+		named, err := reference.ParseNamed(ref.Name())
+		if err != nil {
+			return err
+		}
+		if tagged, ok := ref.(distreference.Tagged); ok {
+			nt, err = reference.WithTag(named, tagged.Tag())
+			if err != nil {
+				return err
+			}
+		} else {
+			named = reference.WithDefaultTag(named)
+			nt = named.(reference.NamedTagged)
+		}
+
+		trusted, err := image.TrustedReference(ctx, dockerCli, nt, newRegistryService())
+		if err != nil {
+			return err
+		}
+		remote = trusted.String()
+	}
+
+	authConfig := command.ResolveAuthConfig(ctx, dockerCli, index)
 
 
 	encodedAuth, err := command.EncodeAuthToBase64(authConfig)
 	encodedAuth, err := command.EncodeAuthToBase64(authConfig)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "plugin install")
+	registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, index, "plugin install")
 
 
 	options := types.PluginInstallOptions{
 	options := types.PluginInstallOptions{
 		RegistryAuth:          encodedAuth,
 		RegistryAuth:          encodedAuth,
+		RemoteRef:             remote,
 		Disabled:              opts.disable,
 		Disabled:              opts.disable,
 		AcceptAllPermissions:  opts.grantPerms,
 		AcceptAllPermissions:  opts.grantPerms,
 		AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.name),
 		AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.name),
@@ -81,10 +161,19 @@ func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error {
 		PrivilegeFunc: registryAuthFunc,
 		PrivilegeFunc: registryAuthFunc,
 		Args:          opts.args,
 		Args:          opts.args,
 	}
 	}
-	if err := dockerCli.Client().PluginInstall(ctx, ref.String(), options); err != nil {
+
+	responseBody, err := dockerCli.Client().PluginInstall(ctx, alias, options)
+	if err != nil {
+		if strings.Contains(err.Error(), "target is image") {
+			return errors.New(err.Error() + " - Use `docker image pull`")
+		}
+		return err
+	}
+	defer responseBody.Close()
+	if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil {
 		return err
 		return err
 	}
 	}
-	fmt.Fprintln(dockerCli.Out(), opts.name)
+	fmt.Fprintf(dockerCli.Out(), "Installed plugin %s\n", opts.name) // todo: return proper values from the API for this result
 	return nil
 	return nil
 }
 }
 
 

+ 2 - 2
cli/command/plugin/list.go

@@ -44,7 +44,7 @@ func runList(dockerCli *command.DockerCli, opts listOptions) error {
 	}
 	}
 
 
 	w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0)
 	w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0)
-	fmt.Fprintf(w, "ID \tNAME \tTAG \tDESCRIPTION\tENABLED")
+	fmt.Fprintf(w, "ID \tNAME \tDESCRIPTION\tENABLED")
 	fmt.Fprintf(w, "\n")
 	fmt.Fprintf(w, "\n")
 
 
 	for _, p := range plugins {
 	for _, p := range plugins {
@@ -56,7 +56,7 @@ func runList(dockerCli *command.DockerCli, opts listOptions) error {
 			desc = stringutils.Ellipsis(desc, 45)
 			desc = stringutils.Ellipsis(desc, 45)
 		}
 		}
 
 
-		fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%v\n", id, p.Name, p.Tag, desc, p.Enabled)
+		fmt.Fprintf(w, "%s\t%s\t%s\t%v\n", id, p.Name, desc, p.Enabled)
 	}
 	}
 	w.Flush()
 	w.Flush()
 	return nil
 	return nil

+ 19 - 1
cli/command/plugin/push.go

@@ -7,6 +7,8 @@ import (
 
 
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
+	"github.com/docker/docker/cli/command/image"
+	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
@@ -21,6 +23,11 @@ func newPushCommand(dockerCli *command.DockerCli) *cobra.Command {
 			return runPush(dockerCli, args[0])
 			return runPush(dockerCli, args[0])
 		},
 		},
 	}
 	}
+
+	flags := cmd.Flags()
+
+	command.AddTrustedFlags(flags, true)
+
 	return cmd
 	return cmd
 }
 }
 
 
@@ -49,5 +56,16 @@ func runPush(dockerCli *command.DockerCli, name string) error {
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	return dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth)
+	responseBody, err := dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth)
+	if err != nil {
+		return err
+	}
+	defer responseBody.Close()
+
+	if command.IsTrusted() {
+		repoInfo.Class = "plugin"
+		return image.PushTrustedReference(dockerCli, repoInfo, named, authConfig, responseBody)
+	}
+
+	return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil)
 }
 }

+ 1 - 15
cli/command/plugin/remove.go

@@ -6,7 +6,6 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
-	"github.com/docker/docker/reference"
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
@@ -41,21 +40,8 @@ func runRemove(dockerCli *command.DockerCli, opts *rmOptions) error {
 
 
 	var errs cli.Errors
 	var errs cli.Errors
 	for _, name := range opts.plugins {
 	for _, name := range opts.plugins {
-		named, err := reference.ParseNamed(name) // FIXME: validate
-		if err != nil {
-			errs = append(errs, err)
-			continue
-		}
-		if reference.IsNameOnly(named) {
-			named = reference.WithDefaultTag(named)
-		}
-		ref, ok := named.(reference.NamedTagged)
-		if !ok {
-			errs = append(errs, fmt.Errorf("invalid name: %s", named.String()))
-			continue
-		}
 		// TODO: pass names to api instead of making multiple api calls
 		// TODO: pass names to api instead of making multiple api calls
-		if err := dockerCli.Client().PluginRemove(ctx, ref.String(), types.PluginRemoveOptions{Force: opts.force}); err != nil {
+		if err := dockerCli.Client().PluginRemove(ctx, name, types.PluginRemoveOptions{Force: opts.force}); err != nil {
 			errs = append(errs, err)
 			errs = append(errs, err)
 			continue
 			continue
 		}
 		}

+ 1 - 19
cli/command/plugin/set.go

@@ -1,13 +1,10 @@
 package plugin
 package plugin
 
 
 import (
 import (
-	"fmt"
-
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 
 
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/cli/command"
 	"github.com/docker/docker/cli/command"
-	"github.com/docker/docker/reference"
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
 )
 )
 
 
@@ -17,24 +14,9 @@ func newSetCommand(dockerCli *command.DockerCli) *cobra.Command {
 		Short: "Change settings for a plugin",
 		Short: "Change settings for a plugin",
 		Args:  cli.RequiresMinArgs(2),
 		Args:  cli.RequiresMinArgs(2),
 		RunE: func(cmd *cobra.Command, args []string) error {
 		RunE: func(cmd *cobra.Command, args []string) error {
-			return runSet(dockerCli, args[0], args[1:])
+			return dockerCli.Client().PluginSet(context.Background(), args[0], args[1:])
 		},
 		},
 	}
 	}
 
 
 	return cmd
 	return cmd
 }
 }
-
-func runSet(dockerCli *command.DockerCli, name string, args []string) error {
-	named, err := reference.ParseNamed(name) // FIXME: validate
-	if err != nil {
-		return err
-	}
-	if reference.IsNameOnly(named) {
-		named = reference.WithDefaultTag(named)
-	}
-	ref, ok := named.(reference.NamedTagged)
-	if !ok {
-		return fmt.Errorf("invalid name: %s", named.String())
-	}
-	return dockerCli.Client().PluginSet(context.Background(), ref.String(), args)
-}

+ 5 - 1
cli/command/system/inspect.go

@@ -121,6 +121,10 @@ func inspectAll(ctx context.Context, dockerCli *command.DockerCli, getSize bool,
 		return strings.Contains(err.Error(), "This node is not a swarm manager")
 		return strings.Contains(err.Error(), "This node is not a swarm manager")
 	}
 	}
 
 
+	isErrNotSupported := func(err error) bool {
+		return strings.Contains(err.Error(), "not supported")
+	}
+
 	return func(ref string) (interface{}, []byte, error) {
 	return func(ref string) (interface{}, []byte, error) {
 		for _, inspectData := range inspectAutodetect {
 		for _, inspectData := range inspectAutodetect {
 			if typeConstraint != "" && inspectData.ObjectType != typeConstraint {
 			if typeConstraint != "" && inspectData.ObjectType != typeConstraint {
@@ -128,7 +132,7 @@ func inspectAll(ctx context.Context, dockerCli *command.DockerCli, getSize bool,
 			}
 			}
 			v, raw, err := inspectData.ObjectInspector(ref)
 			v, raw, err := inspectData.ObjectInspector(ref)
 			if err != nil {
 			if err != nil {
-				if typeConstraint == "" && (apiclient.IsErrNotFound(err) || isErrNotSwarmManager(err)) {
+				if typeConstraint == "" && (apiclient.IsErrNotFound(err) || isErrNotSwarmManager(err) || isErrNotSupported(err)) {
 					continue
 					continue
 				}
 				}
 				return v, raw, err
 				return v, raw, err

+ 12 - 1
cli/trust/trust.go

@@ -147,8 +147,19 @@ func GetNotaryRepository(streams command.Streams, repoInfo *registry.RepositoryI
 		}
 		}
 	}
 	}
 
 
+	scope := auth.RepositoryScope{
+		Repository: repoInfo.FullName(),
+		Actions:    actions,
+		Class:      repoInfo.Class,
+	}
 	creds := simpleCredentialStore{auth: authConfig}
 	creds := simpleCredentialStore{auth: authConfig}
-	tokenHandler := auth.NewTokenHandler(authTransport, creds, repoInfo.FullName(), actions...)
+	tokenHandlerOptions := auth.TokenHandlerOptions{
+		Transport:   authTransport,
+		Credentials: creds,
+		Scopes:      []auth.Scope{scope},
+		ClientID:    registry.AuthClientID,
+	}
+	tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
 	basicHandler := auth.NewBasicHandler(creds)
 	basicHandler := auth.NewBasicHandler(creds)
 	modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)))
 	modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)))
 	tr := transport.NewTransport(base, modifiers...)
 	tr := transport.NewTransport(base, modifiers...)

+ 3 - 3
client/interface.go

@@ -110,9 +110,9 @@ type PluginAPIClient interface {
 	PluginList(ctx context.Context) (types.PluginsListResponse, error)
 	PluginList(ctx context.Context) (types.PluginsListResponse, error)
 	PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error
 	PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error
 	PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error
 	PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error
-	PluginDisable(ctx context.Context, name string) error
-	PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error
-	PluginPush(ctx context.Context, name string, registryAuth string) error
+	PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error
+	PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
+	PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error)
 	PluginSet(ctx context.Context, name string, args []string) error
 	PluginSet(ctx context.Context, name string, args []string) error
 	PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error)
 	PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error)
 	PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error
 	PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error

+ 9 - 2
client/plugin_disable.go

@@ -1,12 +1,19 @@
 package client
 package client
 
 
 import (
 import (
+	"net/url"
+
+	"github.com/docker/docker/api/types"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
 // PluginDisable disables a plugin
 // PluginDisable disables a plugin
-func (cli *Client) PluginDisable(ctx context.Context, name string) error {
-	resp, err := cli.post(ctx, "/plugins/"+name+"/disable", nil, nil, nil)
+func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error {
+	query := url.Values{}
+	if options.Force {
+		query.Set("force", "1")
+	}
+	resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil)
 	ensureReaderClosed(resp)
 	ensureReaderClosed(resp)
 	return err
 	return err
 }
 }

+ 3 - 2
client/plugin_disable_test.go

@@ -8,6 +8,7 @@ import (
 	"strings"
 	"strings"
 	"testing"
 	"testing"
 
 
+	"github.com/docker/docker/api/types"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
@@ -16,7 +17,7 @@ func TestPluginDisableError(t *testing.T) {
 		client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
 		client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
 	}
 	}
 
 
-	err := client.PluginDisable(context.Background(), "plugin_name")
+	err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{})
 	if err == nil || err.Error() != "Error response from daemon: Server error" {
 	if err == nil || err.Error() != "Error response from daemon: Server error" {
 		t.Fatalf("expected a Server Error, got %v", err)
 		t.Fatalf("expected a Server Error, got %v", err)
 	}
 	}
@@ -40,7 +41,7 @@ func TestPluginDisable(t *testing.T) {
 		}),
 		}),
 	}
 	}
 
 
-	err := client.PluginDisable(context.Background(), "plugin_name")
+	err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{})
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}

+ 46 - 23
client/plugin_install.go

@@ -2,73 +2,96 @@ package client
 
 
 import (
 import (
 	"encoding/json"
 	"encoding/json"
+	"io"
 	"net/http"
 	"net/http"
 	"net/url"
 	"net/url"
 
 
+	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
 // PluginInstall installs a plugin
 // PluginInstall installs a plugin
-func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (err error) {
-	// FIXME(vdemeester) name is a ref, we might want to parse/validate it here.
+func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
 	query := url.Values{}
 	query := url.Values{}
-	query.Set("name", name)
+	if _, err := reference.ParseNamed(options.RemoteRef); err != nil {
+		return nil, errors.Wrap(err, "invalid remote reference")
+	}
+	query.Set("remote", options.RemoteRef)
+
 	resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
 	resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
 	if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
 	if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+		// todo: do inspect before to check existing name before checking privileges
 		newAuthHeader, privilegeErr := options.PrivilegeFunc()
 		newAuthHeader, privilegeErr := options.PrivilegeFunc()
 		if privilegeErr != nil {
 		if privilegeErr != nil {
 			ensureReaderClosed(resp)
 			ensureReaderClosed(resp)
-			return privilegeErr
+			return nil, privilegeErr
 		}
 		}
 		options.RegistryAuth = newAuthHeader
 		options.RegistryAuth = newAuthHeader
 		resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
 		resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
 	}
 	}
 	if err != nil {
 	if err != nil {
 		ensureReaderClosed(resp)
 		ensureReaderClosed(resp)
-		return err
+		return nil, err
 	}
 	}
 
 
 	var privileges types.PluginPrivileges
 	var privileges types.PluginPrivileges
 	if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
 	if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
 		ensureReaderClosed(resp)
 		ensureReaderClosed(resp)
-		return err
+		return nil, err
 	}
 	}
 	ensureReaderClosed(resp)
 	ensureReaderClosed(resp)
 
 
 	if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
 	if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
 		accept, err := options.AcceptPermissionsFunc(privileges)
 		accept, err := options.AcceptPermissionsFunc(privileges)
 		if err != nil {
 		if err != nil {
-			return err
+			return nil, err
 		}
 		}
 		if !accept {
 		if !accept {
-			return pluginPermissionDenied{name}
+			return nil, pluginPermissionDenied{options.RemoteRef}
 		}
 		}
 	}
 	}
 
 
-	_, err = cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth)
+	// set name for plugin pull, if empty should default to remote reference
+	query.Set("name", name)
+
+	resp, err = cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 
 
-	defer func() {
+	name = resp.header.Get("Docker-Plugin-Name")
+
+	pr, pw := io.Pipe()
+	go func() { // todo: the client should probably be designed more around the actual api
+		_, err := io.Copy(pw, resp.body)
 		if err != nil {
 		if err != nil {
-			delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
-			ensureReaderClosed(delResp)
+			pw.CloseWithError(err)
+			return
 		}
 		}
-	}()
-
-	if len(options.Args) > 0 {
-		if err := cli.PluginSet(ctx, name, options.Args); err != nil {
-			return err
+		defer func() {
+			if err != nil {
+				delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
+				ensureReaderClosed(delResp)
+			}
+		}()
+		if len(options.Args) > 0 {
+			if err := cli.PluginSet(ctx, name, options.Args); err != nil {
+				pw.CloseWithError(err)
+				return
+			}
 		}
 		}
-	}
 
 
-	if options.Disabled {
-		return nil
-	}
+		if options.Disabled {
+			pw.Close()
+			return
+		}
 
 
-	return cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
+		err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
+		pw.CloseWithError(err)
+	}()
+	return pr, nil
 }
 }
 
 
 func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
 func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {

+ 7 - 3
client/plugin_push.go

@@ -1,13 +1,17 @@
 package client
 package client
 
 
 import (
 import (
+	"io"
+
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
 // PluginPush pushes a plugin to a registry
 // PluginPush pushes a plugin to a registry
-func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) error {
+func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) {
 	headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
 	headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
 	resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers)
 	resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers)
-	ensureReaderClosed(resp)
-	return err
+	if err != nil {
+		return nil, err
+	}
+	return resp.body, nil
 }
 }

+ 2 - 2
client/plugin_push_test.go

@@ -16,7 +16,7 @@ func TestPluginPushError(t *testing.T) {
 		client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
 		client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
 	}
 	}
 
 
-	err := client.PluginPush(context.Background(), "plugin_name", "")
+	_, err := client.PluginPush(context.Background(), "plugin_name", "")
 	if err == nil || err.Error() != "Error response from daemon: Server error" {
 	if err == nil || err.Error() != "Error response from daemon: Server error" {
 		t.Fatalf("expected a Server Error, got %v", err)
 		t.Fatalf("expected a Server Error, got %v", err)
 	}
 	}
@@ -44,7 +44,7 @@ func TestPluginPush(t *testing.T) {
 		}),
 		}),
 	}
 	}
 
 
-	err := client.PluginPush(context.Background(), "plugin_name", "authtoken")
+	_, err := client.PluginPush(context.Background(), "plugin_name", "authtoken")
 	if err != nil {
 	if err != nil {
 		t.Fatal(err)
 		t.Fatal(err)
 	}
 	}

+ 1 - 2
cmd/dockerd/daemon.go

@@ -41,7 +41,6 @@ import (
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/system"
-	"github.com/docker/docker/plugin"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/utils"
@@ -471,7 +470,7 @@ func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) {
 		volume.NewRouter(d),
 		volume.NewRouter(d),
 		build.NewRouter(dockerfile.NewBuildManager(d)),
 		build.NewRouter(dockerfile.NewBuildManager(d)),
 		swarmrouter.NewRouter(c),
 		swarmrouter.NewRouter(c),
-		pluginrouter.NewRouter(plugin.GetManager()),
+		pluginrouter.NewRouter(d.PluginManager()),
 	}
 	}
 
 
 	if d.NetworkControllerEnabled() {
 	if d.NetworkControllerEnabled() {

+ 2 - 0
daemon/cluster/executor/backend.go

@@ -13,6 +13,7 @@ import (
 	"github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/api/types/network"
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	clustertypes "github.com/docker/docker/daemon/cluster/provider"
 	clustertypes "github.com/docker/docker/daemon/cluster/provider"
+	"github.com/docker/docker/plugin"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
 	"github.com/docker/libnetwork"
 	"github.com/docker/libnetwork"
 	"github.com/docker/libnetwork/cluster"
 	"github.com/docker/libnetwork/cluster"
@@ -54,4 +55,5 @@ type Backend interface {
 	WaitForDetachment(context.Context, string, string, string, string) error
 	WaitForDetachment(context.Context, string, string, string, string) error
 	GetRepository(context.Context, reference.NamedTagged, *types.AuthConfig) (distribution.Repository, bool, error)
 	GetRepository(context.Context, reference.NamedTagged, *types.AuthConfig) (distribution.Repository, bool, error)
 	LookupImage(name string) (*types.ImageInspect, error)
 	LookupImage(name string) (*types.ImageInspect, error)
+	PluginManager() *plugin.Manager
 }
 }

+ 2 - 7
daemon/cluster/executor/container/executor.go

@@ -8,7 +8,6 @@ import (
 	"github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/api/types/network"
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
 	clustertypes "github.com/docker/docker/daemon/cluster/provider"
 	clustertypes "github.com/docker/docker/daemon/cluster/provider"
-	"github.com/docker/docker/plugin"
 	networktypes "github.com/docker/libnetwork/types"
 	networktypes "github.com/docker/libnetwork/types"
 	"github.com/docker/swarmkit/agent/exec"
 	"github.com/docker/swarmkit/agent/exec"
 	"github.com/docker/swarmkit/agent/secrets"
 	"github.com/docker/swarmkit/agent/secrets"
@@ -54,7 +53,7 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
 	addPlugins("Authorization", info.Plugins.Authorization)
 	addPlugins("Authorization", info.Plugins.Authorization)
 
 
 	// add v2 plugins
 	// add v2 plugins
-	v2Plugins, err := plugin.GetManager().List()
+	v2Plugins, err := e.backend.PluginManager().List()
 	if err == nil {
 	if err == nil {
 		for _, plgn := range v2Plugins {
 		for _, plgn := range v2Plugins {
 			for _, typ := range plgn.Config.Interface.Types {
 			for _, typ := range plgn.Config.Interface.Types {
@@ -67,13 +66,9 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
 				} else if typ.Capability == "networkdriver" {
 				} else if typ.Capability == "networkdriver" {
 					plgnTyp = "Network"
 					plgnTyp = "Network"
 				}
 				}
-				plgnName := plgn.Name
-				if plgn.Tag != "" {
-					plgnName += ":" + plgn.Tag
-				}
 				plugins[api.PluginDescription{
 				plugins[api.PluginDescription{
 					Type: plgnTyp,
 					Type: plgnTyp,
-					Name: plgnName,
+					Name: plgn.Name,
 				}] = struct{}{}
 				}] = struct{}{}
 			}
 			}
 		}
 		}

+ 23 - 45
daemon/daemon.go

@@ -8,7 +8,6 @@ package daemon
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
-	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"net"
 	"net"
 	"os"
 	"os"
@@ -17,7 +16,6 @@ import (
 	"runtime"
 	"runtime"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
-	"syscall"
 	"time"
 	"time"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
@@ -28,6 +26,7 @@ import (
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/daemon/events"
 	"github.com/docker/docker/daemon/events"
 	"github.com/docker/docker/daemon/exec"
 	"github.com/docker/docker/daemon/exec"
+	"github.com/docker/docker/daemon/initlayer"
 	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/plugin"
 	"github.com/docker/docker/plugin"
 	"github.com/docker/libnetwork/cluster"
 	"github.com/docker/libnetwork/cluster"
@@ -42,14 +41,11 @@ import (
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugingetter"
-	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/pkg/registrar"
 	"github.com/docker/docker/pkg/registrar"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/signal"
-	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/pkg/sysinfo"
 	"github.com/docker/docker/pkg/sysinfo"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/truncindex"
 	"github.com/docker/docker/pkg/truncindex"
-	pluginstore "github.com/docker/docker/plugin/store"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/docker/runconfig"
@@ -59,6 +55,7 @@ import (
 	"github.com/docker/libnetwork"
 	"github.com/docker/libnetwork"
 	nwconfig "github.com/docker/libnetwork/config"
 	nwconfig "github.com/docker/libnetwork/config"
 	"github.com/docker/libtrust"
 	"github.com/docker/libtrust"
+	"github.com/pkg/errors"
 )
 )
 
 
 var (
 var (
@@ -99,7 +96,8 @@ type Daemon struct {
 	gidMaps                   []idtools.IDMap
 	gidMaps                   []idtools.IDMap
 	layerStore                layer.Store
 	layerStore                layer.Store
 	imageStore                image.Store
 	imageStore                image.Store
-	PluginStore               *pluginstore.Store
+	PluginStore               *plugin.Store // todo: remove
+	pluginManager             *plugin.Manager
 	nameIndex                 *registrar.Registrar
 	nameIndex                 *registrar.Registrar
 	linkIndex                 *linkIndex
 	linkIndex                 *linkIndex
 	containerd                libcontainerd.Client
 	containerd                libcontainerd.Client
@@ -549,10 +547,19 @@ func NewDaemon(config *Config, registryService registry.Service, containerdRemot
 	}
 	}
 
 
 	d.RegistryService = registryService
 	d.RegistryService = registryService
-	d.PluginStore = pluginstore.NewStore(config.Root)
+	d.PluginStore = plugin.NewStore(config.Root) // todo: remove
 	// Plugin system initialization should happen before restore. Do not change order.
 	// Plugin system initialization should happen before restore. Do not change order.
-	if err := d.pluginInit(config, containerdRemote); err != nil {
-		return nil, err
+	d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
+		Root:               filepath.Join(config.Root, "plugins"),
+		ExecRoot:           "/run/docker/plugins", // possibly needs fixing
+		Store:              d.PluginStore,
+		Executor:           containerdRemote,
+		RegistryService:    registryService,
+		LiveRestoreEnabled: config.LiveRestoreEnabled,
+		LogPluginEvent:     d.LogPluginEvent, // todo: make private
+	})
+	if err != nil {
+		return nil, errors.Wrap(err, "couldn't create plugin manager")
 	}
 	}
 
 
 	d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
 	d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
@@ -890,36 +897,6 @@ func (daemon *Daemon) V6Subnets() []net.IPNet {
 	return subnets
 	return subnets
 }
 }
 
 
-func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
-	progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
-	operationCancelled := false
-
-	for prog := range progressChan {
-		if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
-			// don't log broken pipe errors as this is the normal case when a client aborts
-			if isBrokenPipe(err) {
-				logrus.Info("Pull session cancelled")
-			} else {
-				logrus.Errorf("error writing progress to client: %v", err)
-			}
-			cancelFunc()
-			operationCancelled = true
-			// Don't return, because we need to continue draining
-			// progressChan until it's closed to avoid a deadlock.
-		}
-	}
-}
-
-func isBrokenPipe(e error) bool {
-	if netErr, ok := e.(*net.OpError); ok {
-		e = netErr.Err
-		if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
-			e = sysErr.Err
-		}
-	}
-	return e == syscall.EPIPE
-}
-
 // GraphDriverName returns the name of the graph driver used by the layer.Store
 // GraphDriverName returns the name of the graph driver used by the layer.Store
 func (daemon *Daemon) GraphDriverName() string {
 func (daemon *Daemon) GraphDriverName() string {
 	return daemon.layerStore.DriverName()
 	return daemon.layerStore.DriverName()
@@ -951,7 +928,7 @@ func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
 
 
 func (daemon *Daemon) setupInitLayer(initPath string) error {
 func (daemon *Daemon) setupInitLayer(initPath string) error {
 	rootUID, rootGID := daemon.GetRemappedUIDGID()
 	rootUID, rootGID := daemon.GetRemappedUIDGID()
-	return setupInitLayer(initPath, rootUID, rootGID)
+	return initlayer.Setup(initPath, rootUID, rootGID)
 }
 }
 
 
 func setDefaultMtu(config *Config) {
 func setDefaultMtu(config *Config) {
@@ -1265,12 +1242,8 @@ func (daemon *Daemon) SetCluster(cluster Cluster) {
 	daemon.cluster = cluster
 	daemon.cluster = cluster
 }
 }
 
 
-func (daemon *Daemon) pluginInit(cfg *Config, remote libcontainerd.Remote) error {
-	return plugin.Init(cfg.Root, daemon.PluginStore, remote, daemon.RegistryService, cfg.LiveRestoreEnabled, daemon.LogPluginEvent)
-}
-
 func (daemon *Daemon) pluginShutdown() {
 func (daemon *Daemon) pluginShutdown() {
-	manager := plugin.GetManager()
+	manager := daemon.pluginManager
 	// Check for a valid manager object. In error conditions, daemon init can fail
 	// Check for a valid manager object. In error conditions, daemon init can fail
 	// and shutdown called, before plugin manager is initialized.
 	// and shutdown called, before plugin manager is initialized.
 	if manager != nil {
 	if manager != nil {
@@ -1278,6 +1251,11 @@ func (daemon *Daemon) pluginShutdown() {
 	}
 	}
 }
 }
 
 
+// PluginManager returns current pluginManager associated with the daemon
+func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method
+	return daemon.pluginManager
+}
+
 // CreateDaemonRoot creates the root for the daemon
 // CreateDaemonRoot creates the root for the daemon
 func CreateDaemonRoot(config *Config) error {
 func CreateDaemonRoot(config *Config) error {
 	// get the canonical path to the Docker root directory
 	// get the canonical path to the Docker root directory

+ 0 - 10
daemon/daemon_solaris.go

@@ -96,16 +96,6 @@ func (daemon *Daemon) getLayerInit() func(string) error {
 	return nil
 	return nil
 }
 }
 
 
-// setupInitLayer populates a directory with mountpoints suitable
-// for bind-mounting dockerinit into the container. The mountpoint is simply an
-// empty file at /.dockerinit
-//
-// This extra layer is used by all containers as the top-most ro layer. It protects
-// the container from unwanted side-effects on the rw layer.
-func setupInitLayer(initLayer string, rootUID, rootGID int) error {
-	return nil
-}
-
 func checkKernel() error {
 func checkKernel() error {
 	// solaris can rely upon checkSystem() below, we don't skew kernel versions
 	// solaris can rely upon checkSystem() below, we don't skew kernel versions
 	return nil
 	return nil

+ 0 - 57
daemon/daemon_unix.go

@@ -858,63 +858,6 @@ func (daemon *Daemon) getLayerInit() func(string) error {
 	return daemon.setupInitLayer
 	return daemon.setupInitLayer
 }
 }
 
 
-// setupInitLayer populates a directory with mountpoints suitable
-// for bind-mounting things into the container.
-//
-// This extra layer is used by all containers as the top-most ro layer. It protects
-// the container from unwanted side-effects on the rw layer.
-func setupInitLayer(initLayer string, rootUID, rootGID int) error {
-	for pth, typ := range map[string]string{
-		"/dev/pts":         "dir",
-		"/dev/shm":         "dir",
-		"/proc":            "dir",
-		"/sys":             "dir",
-		"/.dockerenv":      "file",
-		"/etc/resolv.conf": "file",
-		"/etc/hosts":       "file",
-		"/etc/hostname":    "file",
-		"/dev/console":     "file",
-		"/etc/mtab":        "/proc/mounts",
-	} {
-		parts := strings.Split(pth, "/")
-		prev := "/"
-		for _, p := range parts[1:] {
-			prev = filepath.Join(prev, p)
-			syscall.Unlink(filepath.Join(initLayer, prev))
-		}
-
-		if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil {
-			if os.IsNotExist(err) {
-				if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil {
-					return err
-				}
-				switch typ {
-				case "dir":
-					if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil {
-						return err
-					}
-				case "file":
-					f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755)
-					if err != nil {
-						return err
-					}
-					f.Chown(rootUID, rootGID)
-					f.Close()
-				default:
-					if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil {
-						return err
-					}
-				}
-			} else {
-				return err
-			}
-		}
-	}
-
-	// Layer is ready to use, if it wasn't before.
-	return nil
-}
-
 // Parse the remapped root (user namespace) option, which can be one of:
 // Parse the remapped root (user namespace) option, which can be one of:
 //   username            - valid username from /etc/passwd
 //   username            - valid username from /etc/passwd
 //   username:groupname  - valid username; valid groupname from /etc/group
 //   username:groupname  - valid username; valid groupname from /etc/group

+ 0 - 4
daemon/daemon_windows.go

@@ -61,10 +61,6 @@ func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.Thro
 	return nil, nil
 	return nil, nil
 }
 }
 
 
-func setupInitLayer(initLayer string, rootUID, rootGID int) error {
-	return nil
-}
-
 func (daemon *Daemon) getLayerInit() func(string) error {
 func (daemon *Daemon) getLayerInit() func(string) error {
 	return nil
 	return nil
 }
 }

+ 14 - 10
daemon/image_pull.go

@@ -9,6 +9,7 @@ import (
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/distribution"
 	"github.com/docker/docker/distribution"
+	progressutils "github.com/docker/docker/distribution/utils"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
@@ -84,20 +85,23 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.
 	ctx, cancelFunc := context.WithCancel(ctx)
 	ctx, cancelFunc := context.WithCancel(ctx)
 
 
 	go func() {
 	go func() {
-		writeDistributionProgress(cancelFunc, outStream, progressChan)
+		progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
 		close(writesDone)
 		close(writesDone)
 	}()
 	}()
 
 
 	imagePullConfig := &distribution.ImagePullConfig{
 	imagePullConfig := &distribution.ImagePullConfig{
-		MetaHeaders:      metaHeaders,
-		AuthConfig:       authConfig,
-		ProgressOutput:   progress.ChanOutput(progressChan),
-		RegistryService:  daemon.RegistryService,
-		ImageEventLogger: daemon.LogImageEvent,
-		MetadataStore:    daemon.distributionMetadataStore,
-		ImageStore:       daemon.imageStore,
-		ReferenceStore:   daemon.referenceStore,
-		DownloadManager:  daemon.downloadManager,
+		Config: distribution.Config{
+			MetaHeaders:      metaHeaders,
+			AuthConfig:       authConfig,
+			ProgressOutput:   progress.ChanOutput(progressChan),
+			RegistryService:  daemon.RegistryService,
+			ImageEventLogger: daemon.LogImageEvent,
+			MetadataStore:    daemon.distributionMetadataStore,
+			ImageStore:       distribution.NewImageConfigStoreFromStore(daemon.imageStore),
+			ReferenceStore:   daemon.referenceStore,
+		},
+		DownloadManager: daemon.downloadManager,
+		Schema2Types:    distribution.ImageTypes,
 	}
 	}
 
 
 	err := distribution.Pull(ctx, ref, imagePullConfig)
 	err := distribution.Pull(ctx, ref, imagePullConfig)

+ 17 - 12
daemon/image_push.go

@@ -3,8 +3,10 @@ package daemon
 import (
 import (
 	"io"
 	"io"
 
 
+	"github.com/docker/distribution/manifest/schema2"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/distribution"
 	"github.com/docker/docker/distribution"
+	progressutils "github.com/docker/docker/distribution/utils"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
@@ -33,22 +35,25 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead
 	ctx, cancelFunc := context.WithCancel(ctx)
 	ctx, cancelFunc := context.WithCancel(ctx)
 
 
 	go func() {
 	go func() {
-		writeDistributionProgress(cancelFunc, outStream, progressChan)
+		progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
 		close(writesDone)
 		close(writesDone)
 	}()
 	}()
 
 
 	imagePushConfig := &distribution.ImagePushConfig{
 	imagePushConfig := &distribution.ImagePushConfig{
-		MetaHeaders:      metaHeaders,
-		AuthConfig:       authConfig,
-		ProgressOutput:   progress.ChanOutput(progressChan),
-		RegistryService:  daemon.RegistryService,
-		ImageEventLogger: daemon.LogImageEvent,
-		MetadataStore:    daemon.distributionMetadataStore,
-		LayerStore:       daemon.layerStore,
-		ImageStore:       daemon.imageStore,
-		ReferenceStore:   daemon.referenceStore,
-		TrustKey:         daemon.trustKey,
-		UploadManager:    daemon.uploadManager,
+		Config: distribution.Config{
+			MetaHeaders:      metaHeaders,
+			AuthConfig:       authConfig,
+			ProgressOutput:   progress.ChanOutput(progressChan),
+			RegistryService:  daemon.RegistryService,
+			ImageEventLogger: daemon.LogImageEvent,
+			MetadataStore:    daemon.distributionMetadataStore,
+			ImageStore:       distribution.NewImageConfigStoreFromStore(daemon.imageStore),
+			ReferenceStore:   daemon.referenceStore,
+		},
+		ConfigMediaType: schema2.MediaTypeImageConfig,
+		LayerStore:      distribution.NewLayerProviderFromStore(daemon.layerStore),
+		TrustKey:        daemon.trustKey,
+		UploadManager:   daemon.uploadManager,
 	}
 	}
 
 
 	err = distribution.Push(ctx, ref, imagePushConfig)
 	err = distribution.Push(ctx, ref, imagePushConfig)

+ 13 - 0
daemon/initlayer/setup_solaris.go

@@ -0,0 +1,13 @@
+// +build solaris,cgo
+
+package initlayer
+
+// Setup populates a directory with mountpoints suitable
+// for bind-mounting dockerinit into the container. The mountpoint is simply an
+// empty file at /.dockerinit
+//
+// This extra layer is used by all containers as the top-most ro layer. It protects
+// the container from unwanted side-effects on the rw layer.
+func Setup(initLayer string, rootUID, rootGID int) error {
+	return nil
+}

+ 69 - 0
daemon/initlayer/setup_unix.go

@@ -0,0 +1,69 @@
+// +build linux freebsd
+
+package initlayer
+
+import (
+	"os"
+	"path/filepath"
+	"strings"
+	"syscall"
+
+	"github.com/docker/docker/pkg/idtools"
+)
+
+// Setup populates a directory with mountpoints suitable
+// for bind-mounting things into the container.
+//
+// This extra layer is used by all containers as the top-most ro layer. It protects
+// the container from unwanted side-effects on the rw layer.
+func Setup(initLayer string, rootUID, rootGID int) error {
+	for pth, typ := range map[string]string{
+		"/dev/pts":         "dir",
+		"/dev/shm":         "dir",
+		"/proc":            "dir",
+		"/sys":             "dir",
+		"/.dockerenv":      "file",
+		"/etc/resolv.conf": "file",
+		"/etc/hosts":       "file",
+		"/etc/hostname":    "file",
+		"/dev/console":     "file",
+		"/etc/mtab":        "/proc/mounts",
+	} {
+		parts := strings.Split(pth, "/")
+		prev := "/"
+		for _, p := range parts[1:] {
+			prev = filepath.Join(prev, p)
+			syscall.Unlink(filepath.Join(initLayer, prev))
+		}
+
+		if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil {
+			if os.IsNotExist(err) {
+				if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil {
+					return err
+				}
+				switch typ {
+				case "dir":
+					if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil {
+						return err
+					}
+				case "file":
+					f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755)
+					if err != nil {
+						return err
+					}
+					f.Chown(rootUID, rootGID)
+					f.Close()
+				default:
+					if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil {
+						return err
+					}
+				}
+			} else {
+				return err
+			}
+		}
+	}
+
+	// Layer is ready to use, if it wasn't before.
+	return nil
+}

+ 13 - 0
daemon/initlayer/setup_windows.go

@@ -0,0 +1,13 @@
+// +build windows
+
+package initlayer
+
+// Setup populates a directory with mountpoints suitable
+// for bind-mounting dockerinit into the container. The mountpoint is simply an
+// empty file at /.dockerinit
+//
+// This extra layer is used by all containers as the top-most ro layer. It protects
+// the container from unwanted side-effects on the rw layer.
+func Setup(initLayer string, rootUID, rootGID int) error {
+	return nil
+}

+ 233 - 0
distribution/config.go

@@ -0,0 +1,233 @@
+package distribution
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"runtime"
+
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/manifest/schema2"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/distribution/metadata"
+	"github.com/docker/docker/distribution/xfer"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/layer"
+	"github.com/docker/docker/pkg/progress"
+	"github.com/docker/docker/reference"
+	"github.com/docker/docker/registry"
+	"github.com/docker/libtrust"
+	"golang.org/x/net/context"
+)
+
+// Config stores configuration for communicating
+// with a registry.
+type Config struct {
+	// MetaHeaders stores HTTP headers with metadata about the image
+	MetaHeaders map[string][]string
+	// AuthConfig holds authentication credentials for authenticating with
+	// the registry.
+	AuthConfig *types.AuthConfig
+	// ProgressOutput is the interface for showing the status of the pull
+	// operation.
+	ProgressOutput progress.Output
+	// RegistryService is the registry service to use for TLS configuration
+	// and endpoint lookup.
+	RegistryService registry.Service
+	// ImageEventLogger notifies events for a given image
+	ImageEventLogger func(id, name, action string)
+	// MetadataStore is the storage backend for distribution-specific
+	// metadata.
+	MetadataStore metadata.Store
+	// ImageStore manages images.
+	ImageStore ImageConfigStore
+	// ReferenceStore manages tags. This value is optional, when excluded
+	// content will not be tagged.
+	ReferenceStore reference.Store
+	// RequireSchema2 ensures that only schema2 manifests are used.
+	RequireSchema2 bool
+}
+
+// ImagePullConfig stores pull configuration.
+type ImagePullConfig struct {
+	Config
+
+	// DownloadManager manages concurrent pulls.
+	DownloadManager RootFSDownloadManager
+	// Schema2Types is the valid schema2 configuration types allowed
+	// by the pull operation.
+	Schema2Types []string
+}
+
+// ImagePushConfig stores push configuration.
+type ImagePushConfig struct {
+	Config
+
+	// ConfigMediaType is the configuration media type for
+	// schema2 manifests.
+	ConfigMediaType string
+	// LayerStore manages layers.
+	LayerStore PushLayerProvider
+	// TrustKey is the private key for legacy signatures. This is typically
+	// an ephemeral key, since these signatures are no longer verified.
+	TrustKey libtrust.PrivateKey
+	// UploadManager dispatches uploads.
+	UploadManager *xfer.LayerUploadManager
+}
+
+// ImageConfigStore handles storing and getting image configurations
+// by digest. Allows getting an image configurations rootfs from the
+// configuration.
+type ImageConfigStore interface {
+	Put([]byte) (digest.Digest, error)
+	Get(digest.Digest) ([]byte, error)
+	RootFSFromConfig([]byte) (*image.RootFS, error)
+}
+
+// PushLayerProvider provides layers to be pushed by ChainID.
+type PushLayerProvider interface {
+	Get(layer.ChainID) (PushLayer, error)
+}
+
+// PushLayer is a pushable layer with metadata about the layer
+// and access to the content of the layer.
+type PushLayer interface {
+	ChainID() layer.ChainID
+	DiffID() layer.DiffID
+	Parent() PushLayer
+	Open() (io.ReadCloser, error)
+	Size() (int64, error)
+	MediaType() string
+	Release()
+}
+
+// RootFSDownloadManager handles downloading of the rootfs
+type RootFSDownloadManager interface {
+	// Download downloads the layers into the given initial rootfs and
+	// returns the final rootfs.
+	// Given progress output to track download progress
+	// Returns function to release download resources
+	Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error)
+}
+
+type imageConfigStore struct {
+	image.Store
+}
+
+// NewImageConfigStoreFromStore returns an ImageConfigStore backed
+// by an image.Store for container images.
+func NewImageConfigStoreFromStore(is image.Store) ImageConfigStore {
+	return &imageConfigStore{
+		Store: is,
+	}
+}
+
+func (s *imageConfigStore) Put(c []byte) (digest.Digest, error) {
+	id, err := s.Store.Create(c)
+	return digest.Digest(id), err
+}
+
+func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) {
+	img, err := s.Store.Get(image.IDFromDigest(d))
+	if err != nil {
+		return nil, err
+	}
+	return img.RawJSON(), nil
+}
+
+func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
+	var unmarshalledConfig image.Image
+	if err := json.Unmarshal(c, &unmarshalledConfig); err != nil {
+		return nil, err
+	}
+
+	// fail immediately on windows
+	if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" {
+		return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS)
+	}
+
+	return unmarshalledConfig.RootFS, nil
+}
+
+type storeLayerProvider struct {
+	ls layer.Store
+}
+
+// NewLayerProviderFromStore returns a layer provider backed by
+// an instance of LayerStore. Only getting layers as gzipped
+// tars is supported.
+func NewLayerProviderFromStore(ls layer.Store) PushLayerProvider {
+	return &storeLayerProvider{
+		ls: ls,
+	}
+}
+
+func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) {
+	if lid == "" {
+		return &storeLayer{
+			Layer: layer.EmptyLayer,
+		}, nil
+	}
+	l, err := p.ls.Get(lid)
+	if err != nil {
+		return nil, err
+	}
+
+	sl := storeLayer{
+		Layer: l,
+		ls:    p.ls,
+	}
+	if d, ok := l.(distribution.Describable); ok {
+		return &describableStoreLayer{
+			storeLayer:  sl,
+			describable: d,
+		}, nil
+	}
+
+	return &sl, nil
+}
+
+type storeLayer struct {
+	layer.Layer
+	ls layer.Store
+}
+
+func (l *storeLayer) Parent() PushLayer {
+	p := l.Layer.Parent()
+	if p == nil {
+		return nil
+	}
+	return &storeLayer{
+		Layer: p,
+		ls:    l.ls,
+	}
+}
+
+func (l *storeLayer) Open() (io.ReadCloser, error) {
+	return l.Layer.TarStream()
+}
+
+func (l *storeLayer) Size() (int64, error) {
+	return l.Layer.DiffSize()
+}
+
+func (l *storeLayer) MediaType() string {
+	// layer store always returns uncompressed tars
+	return schema2.MediaTypeUncompressedLayer
+}
+
+func (l *storeLayer) Release() {
+	if l.ls != nil {
+		layer.ReleaseAndLog(l.ls, l.Layer)
+	}
+}
+
+type describableStoreLayer struct {
+	storeLayer
+	describable distribution.Describable
+}
+
+func (l *describableStoreLayer) Descriptor() distribution.Descriptor {
+	return l.describable.Descriptor()
+}

+ 7 - 0
distribution/metadata/v1_id_service.go

@@ -3,6 +3,7 @@ package metadata
 import (
 import (
 	"github.com/docker/docker/image/v1"
 	"github.com/docker/docker/image/v1"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/layer"
+	"github.com/pkg/errors"
 )
 )
 
 
 // V1IDService maps v1 IDs to layers on disk.
 // V1IDService maps v1 IDs to layers on disk.
@@ -24,6 +25,9 @@ func (idserv *V1IDService) namespace() string {
 
 
 // Get finds a layer by its V1 ID.
 // Get finds a layer by its V1 ID.
 func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) {
 func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) {
+	if idserv.store == nil {
+		return "", errors.New("no v1IDService storage")
+	}
 	if err := v1.ValidateID(v1ID); err != nil {
 	if err := v1.ValidateID(v1ID); err != nil {
 		return layer.DiffID(""), err
 		return layer.DiffID(""), err
 	}
 	}
@@ -37,6 +41,9 @@ func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) {
 
 
 // Set associates an image with a V1 ID.
 // Set associates an image with a V1 ID.
 func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error {
 func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error {
+	if idserv.store == nil {
+		return nil
+	}
 	if err := v1.ValidateID(v1ID); err != nil {
 	if err := v1.ValidateID(v1ID); err != nil {
 		return err
 		return err
 	}
 	}

+ 19 - 0
distribution/metadata/v2_metadata_service.go

@@ -5,6 +5,7 @@ import (
 	"crypto/sha256"
 	"crypto/sha256"
 	"encoding/hex"
 	"encoding/hex"
 	"encoding/json"
 	"encoding/json"
+	"errors"
 
 
 	"github.com/docker/distribution/digest"
 	"github.com/docker/distribution/digest"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
@@ -125,6 +126,9 @@ func (serv *v2MetadataService) digestKey(dgst digest.Digest) string {
 
 
 // GetMetadata finds the metadata associated with a layer DiffID.
 // GetMetadata finds the metadata associated with a layer DiffID.
 func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) {
 func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) {
+	if serv.store == nil {
+		return nil, errors.New("no metadata storage")
+	}
 	jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID))
 	jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID))
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
@@ -140,6 +144,9 @@ func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, e
 
 
 // GetDiffID finds a layer DiffID from a digest.
 // GetDiffID finds a layer DiffID from a digest.
 func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) {
 func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) {
+	if serv.store == nil {
+		return layer.DiffID(""), errors.New("no metadata storage")
+	}
 	diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst))
 	diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst))
 	if err != nil {
 	if err != nil {
 		return layer.DiffID(""), err
 		return layer.DiffID(""), err
@@ -151,6 +158,12 @@ func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, erro
 // Add associates metadata with a layer DiffID. If too many metadata entries are
 // Add associates metadata with a layer DiffID. If too many metadata entries are
 // present, the oldest one is dropped.
 // present, the oldest one is dropped.
 func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error {
 func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error {
+	if serv.store == nil {
+		// Support a service which has no backend storage, in this case
+		// an add becomes a no-op.
+		// TODO: implement in memory storage
+		return nil
+	}
 	oldMetadata, err := serv.GetMetadata(diffID)
 	oldMetadata, err := serv.GetMetadata(diffID)
 	if err != nil {
 	if err != nil {
 		oldMetadata = nil
 		oldMetadata = nil
@@ -192,6 +205,12 @@ func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, me
 
 
 // Remove unassociates a metadata entry from a layer DiffID.
 // Remove unassociates a metadata entry from a layer DiffID.
 func (serv *v2MetadataService) Remove(metadata V2Metadata) error {
 func (serv *v2MetadataService) Remove(metadata V2Metadata) error {
+	if serv.store == nil {
+		// Support a service which has no backend storage, in this case
+		// an remove becomes a no-op.
+		// TODO: implement in memory storage
+		return nil
+	}
 	diffID, err := serv.GetDiffID(metadata.Digest)
 	diffID, err := serv.GetDiffID(metadata.Digest)
 	if err != nil {
 	if err != nil {
 		return err
 		return err

+ 4 - 29
distribution/pull.go

@@ -6,42 +6,13 @@ import (
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution/digest"
 	"github.com/docker/distribution/digest"
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api"
-	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/metadata"
-	"github.com/docker/docker/distribution/xfer"
-	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
-// ImagePullConfig stores pull configuration.
-type ImagePullConfig struct {
-	// MetaHeaders stores HTTP headers with metadata about the image
-	MetaHeaders map[string][]string
-	// AuthConfig holds authentication credentials for authenticating with
-	// the registry.
-	AuthConfig *types.AuthConfig
-	// ProgressOutput is the interface for showing the status of the pull
-	// operation.
-	ProgressOutput progress.Output
-	// RegistryService is the registry service to use for TLS configuration
-	// and endpoint lookup.
-	RegistryService registry.Service
-	// ImageEventLogger notifies events for a given image
-	ImageEventLogger func(id, name, action string)
-	// MetadataStore is the storage backend for distribution-specific
-	// metadata.
-	MetadataStore metadata.Store
-	// ImageStore manages images.
-	ImageStore image.Store
-	// ReferenceStore manages tags.
-	ReferenceStore reference.Store
-	// DownloadManager manages concurrent pulls.
-	DownloadManager *xfer.LayerDownloadManager
-}
-
 // Puller is an interface that abstracts pulling for different API versions.
 // Puller is an interface that abstracts pulling for different API versions.
 type Puller interface {
 type Puller interface {
 	// Pull tries to pull the image referenced by `tag`
 	// Pull tries to pull the image referenced by `tag`
@@ -117,6 +88,10 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo
 		confirmedTLSRegistries = make(map[string]struct{})
 		confirmedTLSRegistries = make(map[string]struct{})
 	)
 	)
 	for _, endpoint := range endpoints {
 	for _, endpoint := range endpoints {
+		if imagePullConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 {
+			continue
+		}
+
 		if confirmedV2 && endpoint.Version == registry.APIVersion1 {
 		if confirmedV2 && endpoint.Version == registry.APIVersion1 {
 			logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
 			logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
 			continue
 			continue

+ 5 - 3
distribution/pull_v1.go

@@ -243,13 +243,15 @@ func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNa
 		return err
 		return err
 	}
 	}
 
 
-	imageID, err := p.config.ImageStore.Create(config)
+	imageID, err := p.config.ImageStore.Put(config)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	if err := p.config.ReferenceStore.AddTag(localNameRef, imageID.Digest(), true); err != nil {
-		return err
+	if p.config.ReferenceStore != nil {
+		if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil {
+			return err
+		}
 	}
 	}
 
 
 	return nil
 	return nil

+ 88 - 70
distribution/pull_v2.go

@@ -33,9 +33,8 @@ import (
 )
 )
 
 
 var (
 var (
-	errRootFSMismatch  = errors.New("layers from manifest don't match image configuration")
-	errMediaTypePlugin = errors.New("target is a plugin")
-	errRootFSInvalid   = errors.New("invalid rootfs in image configuration")
+	errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
+	errRootFSInvalid  = errors.New("invalid rootfs in image configuration")
 )
 )
 
 
 // ImageConfigPullError is an error pulling the image config blob
 // ImageConfigPullError is an error pulling the image config blob
@@ -355,8 +354,19 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
 	}
 	}
 
 
 	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
 	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
-		if m.Manifest.Config.MediaType == schema2.MediaTypePluginConfig {
-			return false, errMediaTypePlugin
+		var allowedMediatype bool
+		for _, t := range p.config.Schema2Types {
+			if m.Manifest.Config.MediaType == t {
+				allowedMediatype = true
+				break
+			}
+		}
+		if !allowedMediatype {
+			configClass := mediaTypeClasses[m.Manifest.Config.MediaType]
+			if configClass == "" {
+				configClass = "unknown"
+			}
+			return false, fmt.Errorf("target is %s", configClass)
 		}
 		}
 	}
 	}
 
 
@@ -374,6 +384,9 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
 
 
 	switch v := manifest.(type) {
 	switch v := manifest.(type) {
 	case *schema1.SignedManifest:
 	case *schema1.SignedManifest:
+		if p.config.RequireSchema2 {
+			return false, fmt.Errorf("invalid manifest: not schema2")
+		}
 		id, manifestDigest, err = p.pullSchema1(ctx, ref, v)
 		id, manifestDigest, err = p.pullSchema1(ctx, ref, v)
 		if err != nil {
 		if err != nil {
 			return false, err
 			return false, err
@@ -394,25 +407,27 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat
 
 
 	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
 	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
 
 
-	oldTagID, err := p.config.ReferenceStore.Get(ref)
-	if err == nil {
-		if oldTagID == id {
-			return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
-		}
-	} else if err != reference.ErrDoesNotExist {
-		return false, err
-	}
-
-	if canonical, ok := ref.(reference.Canonical); ok {
-		if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
-			return false, err
-		}
-	} else {
-		if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
+	if p.config.ReferenceStore != nil {
+		oldTagID, err := p.config.ReferenceStore.Get(ref)
+		if err == nil {
+			if oldTagID == id {
+				return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
+			}
+		} else if err != reference.ErrDoesNotExist {
 			return false, err
 			return false, err
 		}
 		}
-		if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
-			return false, err
+
+		if canonical, ok := ref.(reference.Canonical); ok {
+			if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
+				return false, err
+			}
+		} else {
+			if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
+				return false, err
+			}
+			if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
+				return false, err
+			}
 		}
 		}
 	}
 	}
 	return true, nil
 	return true, nil
@@ -481,14 +496,14 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverif
 		return "", "", err
 		return "", "", err
 	}
 	}
 
 
-	imageID, err := p.config.ImageStore.Create(config)
+	imageID, err := p.config.ImageStore.Put(config)
 	if err != nil {
 	if err != nil {
 		return "", "", err
 		return "", "", err
 	}
 	}
 
 
 	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
 	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
 
 
-	return imageID.Digest(), manifestDigest, nil
+	return imageID, manifestDigest, nil
 }
 }
 
 
 func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
 func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
@@ -498,7 +513,7 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
 	}
 	}
 
 
 	target := mfst.Target()
 	target := mfst.Target()
-	if _, err := p.config.ImageStore.Get(image.IDFromDigest(target.Digest)); err == nil {
+	if _, err := p.config.ImageStore.Get(target.Digest); err == nil {
 		// If the image already exists locally, no need to pull
 		// If the image already exists locally, no need to pull
 		// anything.
 		// anything.
 		return target.Digest, manifestDigest, nil
 		return target.Digest, manifestDigest, nil
@@ -537,9 +552,9 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
 	}()
 	}()
 
 
 	var (
 	var (
-		configJSON         []byte       // raw serialized image config
-		unmarshalledConfig image.Image  // deserialized image config
-		downloadRootFS     image.RootFS // rootFS to use for registering layers.
+		configJSON       []byte        // raw serialized image config
+		downloadedRootFS *image.RootFS // rootFS from registered layers
+		configRootFS     *image.RootFS // rootFS from configuration
 	)
 	)
 
 
 	// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
 	// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
@@ -551,84 +566,87 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s
 	// check to block Windows images being pulled on Linux is implemented, it
 	// check to block Windows images being pulled on Linux is implemented, it
 	// may be necessary to perform the same type of serialisation.
 	// may be necessary to perform the same type of serialisation.
 	if runtime.GOOS == "windows" {
 	if runtime.GOOS == "windows" {
-		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
+		configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan)
 		if err != nil {
 		if err != nil {
 			return "", "", err
 			return "", "", err
 		}
 		}
 
 
-		if unmarshalledConfig.RootFS == nil {
+		if configRootFS == nil {
 			return "", "", errRootFSInvalid
 			return "", "", errRootFSInvalid
 		}
 		}
-
-		if unmarshalledConfig.OS == "linux" {
-			return "", "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS)
-		}
 	}
 	}
 
 
-	downloadRootFS = *image.NewRootFS()
-
-	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
-	if err != nil {
-		if configJSON != nil {
-			// Already received the config
-			return "", "", err
-		}
-		select {
-		case err = <-errChan:
-			return "", "", err
-		default:
-			cancel()
+	if p.config.DownloadManager != nil {
+		downloadRootFS := *image.NewRootFS()
+		rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
+		if err != nil {
+			if configJSON != nil {
+				// Already received the config
+				return "", "", err
+			}
 			select {
 			select {
-			case <-configChan:
-			case <-errChan:
+			case err = <-errChan:
+				return "", "", err
+			default:
+				cancel()
+				select {
+				case <-configChan:
+				case <-errChan:
+				}
+				return "", "", err
 			}
 			}
-			return "", "", err
 		}
 		}
+		if release != nil {
+			defer release()
+		}
+
+		downloadedRootFS = &rootFS
 	}
 	}
-	defer release()
 
 
 	if configJSON == nil {
 	if configJSON == nil {
-		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
+		configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan)
 		if err != nil {
 		if err != nil {
 			return "", "", err
 			return "", "", err
 		}
 		}
 
 
-		if unmarshalledConfig.RootFS == nil {
+		if configRootFS == nil {
 			return "", "", errRootFSInvalid
 			return "", "", errRootFSInvalid
 		}
 		}
 	}
 	}
 
 
-	// The DiffIDs returned in rootFS MUST match those in the config.
-	// Otherwise the image config could be referencing layers that aren't
-	// included in the manifest.
-	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
-		return "", "", errRootFSMismatch
-	}
-
-	for i := range rootFS.DiffIDs {
-		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
+	if downloadedRootFS != nil {
+		// The DiffIDs returned in rootFS MUST match those in the config.
+		// Otherwise the image config could be referencing layers that aren't
+		// included in the manifest.
+		if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) {
 			return "", "", errRootFSMismatch
 			return "", "", errRootFSMismatch
 		}
 		}
+
+		for i := range downloadedRootFS.DiffIDs {
+			if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] {
+				return "", "", errRootFSMismatch
+			}
+		}
 	}
 	}
 
 
-	imageID, err := p.config.ImageStore.Create(configJSON)
+	imageID, err := p.config.ImageStore.Put(configJSON)
 	if err != nil {
 	if err != nil {
 		return "", "", err
 		return "", "", err
 	}
 	}
 
 
-	return imageID.Digest(), manifestDigest, nil
+	return imageID, manifestDigest, nil
 }
 }
 
 
-func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
+func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, error) {
 	select {
 	select {
 	case configJSON := <-configChan:
 	case configJSON := <-configChan:
-		var unmarshalledConfig image.Image
-		if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
-			return nil, image.Image{}, err
+		rootfs, err := s.RootFSFromConfig(configJSON)
+		if err != nil {
+			return nil, nil, err
 		}
 		}
-		return configJSON, unmarshalledConfig, nil
+		return configJSON, rootfs, nil
 	case err := <-errChan:
 	case err := <-errChan:
-		return nil, image.Image{}, err
+		return nil, nil, err
 		// Don't need a case for ctx.Done in the select because cancellation
 		// Don't need a case for ctx.Done in the select because cancellation
 		// will trigger an error in p.pullSchema2ImageConfig.
 		// will trigger an error in p.pullSchema2ImageConfig.
 	}
 	}

+ 3 - 36
distribution/push.go

@@ -7,49 +7,13 @@ import (
 	"io"
 	"io"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/metadata"
-	"github.com/docker/docker/distribution/xfer"
-	"github.com/docker/docker/image"
-	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
-	"github.com/docker/libtrust"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
-// ImagePushConfig stores push configuration.
-type ImagePushConfig struct {
-	// MetaHeaders store HTTP headers with metadata about the image
-	MetaHeaders map[string][]string
-	// AuthConfig holds authentication credentials for authenticating with
-	// the registry.
-	AuthConfig *types.AuthConfig
-	// ProgressOutput is the interface for showing the status of the push
-	// operation.
-	ProgressOutput progress.Output
-	// RegistryService is the registry service to use for TLS configuration
-	// and endpoint lookup.
-	RegistryService registry.Service
-	// ImageEventLogger notifies events for a given image
-	ImageEventLogger func(id, name, action string)
-	// MetadataStore is the storage backend for distribution-specific
-	// metadata.
-	MetadataStore metadata.Store
-	// LayerStore manages layers.
-	LayerStore layer.Store
-	// ImageStore manages images.
-	ImageStore image.Store
-	// ReferenceStore manages tags.
-	ReferenceStore reference.Store
-	// TrustKey is the private key for legacy signatures. This is typically
-	// an ephemeral key, since these signatures are no longer verified.
-	TrustKey libtrust.PrivateKey
-	// UploadManager dispatches uploads.
-	UploadManager *xfer.LayerUploadManager
-}
-
 // Pusher is an interface that abstracts pushing for different API versions.
 // Pusher is an interface that abstracts pushing for different API versions.
 type Pusher interface {
 type Pusher interface {
 	// Push tries to push the image configured at the creation of Pusher.
 	// Push tries to push the image configured at the creation of Pusher.
@@ -127,6 +91,9 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo
 	)
 	)
 
 
 	for _, endpoint := range endpoints {
 	for _, endpoint := range endpoints {
+		if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 {
+			continue
+		}
 		if confirmedV2 && endpoint.Version == registry.APIVersion1 {
 		if confirmedV2 && endpoint.Version == registry.APIVersion1 {
 			logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
 			logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
 			continue
 			continue

+ 19 - 13
distribution/push_v1.go

@@ -137,7 +137,7 @@ func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1Dependen
 }
 }
 
 
 // Retrieve the all the images to be uploaded in the correct order
 // Retrieve the all the images to be uploaded in the correct order
-func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []layer.Layer, err error) {
+func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) {
 	tagsByImage = make(map[image.ID][]string)
 	tagsByImage = make(map[image.ID][]string)
 
 
 	// Ignore digest references
 	// Ignore digest references
@@ -202,24 +202,30 @@ func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID
 	return
 	return
 }
 }
 
 
-func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]layer.Layer) (imageListForThisTag []v1Image, err error) {
-	img, err := p.config.ImageStore.Get(imgID)
+func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) {
+	ics, ok := p.config.ImageStore.(*imageConfigStore)
+	if !ok {
+		return nil, fmt.Errorf("only image store images supported for v1 push")
+	}
+	img, err := ics.Store.Get(imgID)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
 	topLayerID := img.RootFS.ChainID()
 	topLayerID := img.RootFS.ChainID()
 
 
-	var l layer.Layer
-	if topLayerID == "" {
-		l = layer.EmptyLayer
-	} else {
-		l, err = p.config.LayerStore.Get(topLayerID)
-		*referencedLayers = append(*referencedLayers, l)
-		if err != nil {
-			return nil, fmt.Errorf("failed to get top layer from image: %v", err)
-		}
+	pl, err := p.config.LayerStore.Get(topLayerID)
+	*referencedLayers = append(*referencedLayers, pl)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get top layer from image: %v", err)
+	}
+
+	// V1 push is deprecated, only support existing layerstore layers
+	lsl, ok := pl.(*storeLayer)
+	if !ok {
+		return nil, fmt.Errorf("only layer store layers supported for v1 push")
 	}
 	}
+	l := lsl.Layer
 
 
 	dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen)
 	dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen)
 	if err != nil {
 	if err != nil {
@@ -371,7 +377,7 @@ func (p *v1Pusher) pushRepository(ctx context.Context) error {
 	imgList, tags, referencedLayers, err := p.getImageList()
 	imgList, tags, referencedLayers, err := p.getImageList()
 	defer func() {
 	defer func() {
 		for _, l := range referencedLayers {
 		for _, l := range referencedLayers {
-			p.config.LayerStore.Release(l)
+			l.Release()
 		}
 		}
 	}()
 	}()
 	if err != nil {
 	if err != nil {

+ 37 - 34
distribution/push_v2.go

@@ -20,7 +20,6 @@ import (
 	"github.com/docker/distribution/registry/client"
 	"github.com/docker/distribution/registry/client"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/xfer"
 	"github.com/docker/docker/distribution/xfer"
-	"github.com/docker/docker/image"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/pkg/progress"
@@ -123,23 +122,21 @@ func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) {
 func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error {
 func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error {
 	logrus.Debugf("Pushing repository: %s", ref.String())
 	logrus.Debugf("Pushing repository: %s", ref.String())
 
 
-	img, err := p.config.ImageStore.Get(image.IDFromDigest(id))
+	imgConfig, err := p.config.ImageStore.Get(id)
 	if err != nil {
 	if err != nil {
 		return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err)
 		return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err)
 	}
 	}
 
 
-	var l layer.Layer
+	rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig)
+	if err != nil {
+		return fmt.Errorf("unable to get rootfs for image %s: %s", ref.String(), err)
+	}
 
 
-	topLayerID := img.RootFS.ChainID()
-	if topLayerID == "" {
-		l = layer.EmptyLayer
-	} else {
-		l, err = p.config.LayerStore.Get(topLayerID)
-		if err != nil {
-			return fmt.Errorf("failed to get top layer from image: %v", err)
-		}
-		defer layer.ReleaseAndLog(p.config.LayerStore, l)
+	l, err := p.config.LayerStore.Get(rootfs.ChainID())
+	if err != nil {
+		return fmt.Errorf("failed to get top layer from image: %v", err)
 	}
 	}
+	defer l.Release()
 
 
 	hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig)
 	hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig)
 	if err != nil {
 	if err != nil {
@@ -158,7 +155,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id
 	}
 	}
 
 
 	// Loop bounds condition is to avoid pushing the base layer on Windows.
 	// Loop bounds condition is to avoid pushing the base layer on Windows.
-	for i := 0; i < len(img.RootFS.DiffIDs); i++ {
+	for i := 0; i < len(rootfs.DiffIDs); i++ {
 		descriptor := descriptorTemplate
 		descriptor := descriptorTemplate
 		descriptor.layer = l
 		descriptor.layer = l
 		descriptor.checkedDigests = make(map[digest.Digest]struct{})
 		descriptor.checkedDigests = make(map[digest.Digest]struct{})
@@ -172,7 +169,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id
 	}
 	}
 
 
 	// Try schema2 first
 	// Try schema2 first
-	builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON())
+	builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig)
 	manifest, err := manifestFromBuilder(ctx, builder, descriptors)
 	manifest, err := manifestFromBuilder(ctx, builder, descriptors)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
@@ -185,7 +182,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id
 
 
 	putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())}
 	putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())}
 	if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
 	if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
-		if runtime.GOOS == "windows" {
+		if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 {
 			logrus.Warnf("failed to upload schema2 manifest: %v", err)
 			logrus.Warnf("failed to upload schema2 manifest: %v", err)
 			return err
 			return err
 		}
 		}
@@ -196,7 +193,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
-		builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, img.RawJSON())
+		builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig)
 		manifest, err = manifestFromBuilder(ctx, builder, descriptors)
 		manifest, err = manifestFromBuilder(ctx, builder, descriptors)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
@@ -246,7 +243,7 @@ func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuild
 }
 }
 
 
 type v2PushDescriptor struct {
 type v2PushDescriptor struct {
-	layer             layer.Layer
+	layer             PushLayer
 	v2MetadataService metadata.V2MetadataService
 	v2MetadataService metadata.V2MetadataService
 	hmacKey           []byte
 	hmacKey           []byte
 	repoInfo          reference.Named
 	repoInfo          reference.Named
@@ -425,26 +422,32 @@ func (pd *v2PushDescriptor) uploadUsingSession(
 	diffID layer.DiffID,
 	diffID layer.DiffID,
 	layerUpload distribution.BlobWriter,
 	layerUpload distribution.BlobWriter,
 ) (distribution.Descriptor, error) {
 ) (distribution.Descriptor, error) {
-	arch, err := pd.layer.TarStream()
-	if err != nil {
-		return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
-	}
-
-	// don't care if this fails; best effort
-	size, _ := pd.layer.DiffSize()
-
-	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing")
-	compressedReader, compressionDone := compress(reader)
-	defer func() {
+	var reader io.ReadCloser
+
+	contentReader, err := pd.layer.Open()
+	size, _ := pd.layer.Size()
+
+	reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing")
+
+	switch m := pd.layer.MediaType(); m {
+	case schema2.MediaTypeUncompressedLayer:
+		compressedReader, compressionDone := compress(reader)
+		defer func(closer io.Closer) {
+			closer.Close()
+			<-compressionDone
+		}(reader)
+		reader = compressedReader
+	case schema2.MediaTypeLayer:
+	default:
 		reader.Close()
 		reader.Close()
-		<-compressionDone
-	}()
+		return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m)
+	}
 
 
 	digester := digest.Canonical.New()
 	digester := digest.Canonical.New()
-	tee := io.TeeReader(compressedReader, digester.Hash())
+	tee := io.TeeReader(reader, digester.Hash())
 
 
 	nn, err := layerUpload.ReadFrom(tee)
 	nn, err := layerUpload.ReadFrom(tee)
-	compressedReader.Close()
+	reader.Close()
 	if err != nil {
 	if err != nil {
 		return distribution.Descriptor{}, retryOnError(err)
 		return distribution.Descriptor{}, retryOnError(err)
 	}
 	}
@@ -568,8 +571,8 @@ attempts:
 // repository and whether the check shall be done also with digests mapped to different repositories. The
 // repository and whether the check shall be done also with digests mapped to different repositories. The
 // decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost
 // decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost
 // of upload does not outweigh a latency.
 // of upload does not outweigh a latency.
-func getMaxMountAndExistenceCheckAttempts(layer layer.Layer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) {
-	size, err := layer.DiffSize()
+func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) {
+	size, err := layer.Size()
 	switch {
 	switch {
 	// big blob
 	// big blob
 	case size > middleLayerMaximumSize:
 	case size > middleLayerMaximumSize:

+ 5 - 3
distribution/push_v2_test.go

@@ -387,9 +387,11 @@ func TestLayerAlreadyExists(t *testing.T) {
 		ctx := context.Background()
 		ctx := context.Background()
 		ms := &mockV2MetadataService{}
 		ms := &mockV2MetadataService{}
 		pd := &v2PushDescriptor{
 		pd := &v2PushDescriptor{
-			hmacKey:           []byte(tc.hmacKey),
-			repoInfo:          repoInfo,
-			layer:             layer.EmptyLayer,
+			hmacKey:  []byte(tc.hmacKey),
+			repoInfo: repoInfo,
+			layer: &storeLayer{
+				Layer: layer.EmptyLayer,
+			},
 			repo:              repo,
 			repo:              repo,
 			v2MetadataService: ms,
 			v2MetadataService: ms,
 			pushState:         &pushState{remoteLayers: make(map[layer.DiffID]distribution.Descriptor)},
 			pushState:         &pushState{remoteLayers: make(map[layer.DiffID]distribution.Descriptor)},

+ 30 - 5
distribution/registry.go

@@ -7,6 +7,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/docker/distribution"
 	"github.com/docker/distribution"
+	"github.com/docker/distribution/manifest/schema2"
 	distreference "github.com/docker/distribution/reference"
 	distreference "github.com/docker/distribution/reference"
 	"github.com/docker/distribution/registry/client"
 	"github.com/docker/distribution/registry/client"
 	"github.com/docker/distribution/registry/client/auth"
 	"github.com/docker/distribution/registry/client/auth"
@@ -18,6 +19,34 @@ import (
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
+// ImageTypes represents the schema2 config types for images
+var ImageTypes = []string{
+	schema2.MediaTypeImageConfig,
+	// Handle unexpected values from https://github.com/docker/distribution/issues/1621
+	"application/octet-stream",
+	// Treat defaulted values as images, newer types cannot be implied
+	"",
+}
+
+// PluginTypes represents the schema2 config types for plugins
+var PluginTypes = []string{
+	schema2.MediaTypePluginConfig,
+}
+
+var mediaTypeClasses map[string]string
+
+func init() {
+	// initialize media type classes with all know types for
+	// plugin
+	mediaTypeClasses = map[string]string{}
+	for _, t := range ImageTypes {
+		mediaTypeClasses[t] = "image"
+	}
+	for _, t := range PluginTypes {
+		mediaTypeClasses[t] = "plugin"
+	}
+}
+
 // NewV2Repository returns a repository (v2 only). It creates an HTTP transport
 // NewV2Repository returns a repository (v2 only). It creates an HTTP transport
 // providing timeout settings and authentication support, and also verifies the
 // providing timeout settings and authentication support, and also verifies the
 // remote API version.
 // remote API version.
@@ -73,11 +102,7 @@ func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, end
 		scope := auth.RepositoryScope{
 		scope := auth.RepositoryScope{
 			Repository: repoName,
 			Repository: repoName,
 			Actions:    actions,
 			Actions:    actions,
-		}
-
-		// Keep image repositories blank for scope compatibility
-		if repoInfo.Class != "image" {
-			scope.Class = repoInfo.Class
+			Class:      repoInfo.Class,
 		}
 		}
 
 
 		creds := registry.NewStaticCredentialStore(authConfig)
 		creds := registry.NewStaticCredentialStore(authConfig)

+ 6 - 3
distribution/registry_unit_test.go

@@ -70,10 +70,13 @@ func testTokenPassThru(t *testing.T, ts *httptest.Server) {
 		Official: false,
 		Official: false,
 	}
 	}
 	imagePullConfig := &ImagePullConfig{
 	imagePullConfig := &ImagePullConfig{
-		MetaHeaders: http.Header{},
-		AuthConfig: &types.AuthConfig{
-			RegistryToken: secretRegistryToken,
+		Config: Config{
+			MetaHeaders: http.Header{},
+			AuthConfig: &types.AuthConfig{
+				RegistryToken: secretRegistryToken,
+			},
 		},
 		},
+		Schema2Types: ImageTypes,
 	}
 	}
 	puller, err := newPuller(endpoint, repoInfo, imagePullConfig)
 	puller, err := newPuller(endpoint, repoInfo, imagePullConfig)
 	if err != nil {
 	if err != nil {

+ 44 - 0
distribution/utils/progress.go

@@ -0,0 +1,44 @@
+package utils
+
+import (
+	"io"
+	"net"
+	"os"
+	"syscall"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/progress"
+	"github.com/docker/docker/pkg/streamformatter"
+)
+
+// WriteDistributionProgress is a helper for writing progress from chan to JSON
+// stream with an optional cancel function.
+func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
+	progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
+	operationCancelled := false
+
+	for prog := range progressChan {
+		if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
+			// don't log broken pipe errors as this is the normal case when a client aborts
+			if isBrokenPipe(err) {
+				logrus.Info("Pull session cancelled")
+			} else {
+				logrus.Errorf("error writing progress to client: %v", err)
+			}
+			cancelFunc()
+			operationCancelled = true
+			// Don't return, because we need to continue draining
+			// progressChan until it's closed to avoid a deadlock.
+		}
+	}
+}
+
+func isBrokenPipe(e error) bool {
+	if netErr, ok := e.(*net.OpError); ok {
+		e = netErr.Err
+		if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
+			e = sysErr.Err
+		}
+	}
+	return e == syscall.EPIPE
+}

+ 15 - 146
docs/extend/index.md

@@ -109,93 +109,6 @@ commands and options, see the
 
 
 ## Developing a plugin
 ## Developing a plugin
 
 
-Currently, there are no CLI commands available to help you develop a plugin.
-This is expected to change in a future release. The manual process for creating
-plugins is described in this section.
-
-### Plugin location and files
-
-Plugins are stored in `/var/lib/docker/plugins`. The `plugins.json` file lists
-each plugin's configuration, and each plugin is stored in a directory with a
-unique identifier.
-
-```bash
-# ls -la /var/lib/docker/plugins
-total 20
-drwx------  4 root root 4096 Aug  8 18:03 .
-drwx--x--x 12 root root 4096 Aug  8 17:53 ..
-drwxr-xr-x  3 root root 4096 Aug  8 17:56 cd851ce43a403
--rw-------  1 root root 2107 Aug  8 18:03 plugins.json
-```
-
-### Format of plugins.json
-
-The `plugins.json` is an inventory of all installed plugins. This example shows
-a `plugins.json` with a single plugin installed.
-
-```json
-# cat plugins.json
-{
-  "cd851ce43a403": {
-    "plugin": {
-      "Config": {
-        "Args": {
-          "Value": null,
-          "Settable": null,
-          "Description": "",
-          "Name": ""
-        },
-        "Env": null,
-        "Devices": null,
-        "Mounts": null,
-        "Capabilities": [
-          "CAP_SYS_ADMIN"
-        ],
-        "Description": "sshFS plugin for Docker",
-        "Documentation": "https://docs.docker.com/engine/extend/plugins/",
-        "Interface": {
-          "Socket": "sshfs.sock",
-          "Types": [
-            "docker.volumedriver/1.0"
-          ]
-        },
-        "Entrypoint": [
-          "/go/bin/docker-volume-sshfs"
-        ],
-        "Workdir": "",
-        "User": {},
-        "Network": {
-          "Type": "host"
-        }
-      },
-      "Config": {
-        "Devices": null,
-        "Args": null,
-        "Env": [],
-        "Mounts": []
-      },
-      "Active": true,
-      "Tag": "latest",
-      "Name": "vieux/sshfs",
-      "Id": "cd851ce43a403"
-    }
-  }
-}
-```
-
-### Contents of a plugin directory
-
-Each directory within `/var/lib/docker/plugins/` contains a `rootfs` directory
-and two JSON files.
-
-```bash
-# ls -la /var/lib/docker/plugins/cd851ce43a403
-total 12
-drwx------ 19 root root 4096 Aug  8 17:56 rootfs
--rw-r--r--  1 root root   50 Aug  8 17:56 plugin-settings.json
--rw-------  1 root root  347 Aug  8 17:56 config.json
-```
-
 #### The rootfs directory
 #### The rootfs directory
 The `rootfs` directory represents the root filesystem of the plugin. In this
 The `rootfs` directory represents the root filesystem of the plugin. In this
 example, it was created from a Dockerfile:
 example, it was created from a Dockerfile:
@@ -206,20 +119,17 @@ plugin's filesystem for docker to communicate with the plugin.
 ```bash
 ```bash
 $ git clone https://github.com/vieux/docker-volume-sshfs
 $ git clone https://github.com/vieux/docker-volume-sshfs
 $ cd docker-volume-sshfs
 $ cd docker-volume-sshfs
-$ docker build -t rootfs .
-$ id=$(docker create rootfs true) # id was cd851ce43a403 when the image was created
-$ sudo mkdir -p /var/lib/docker/plugins/$id/rootfs
-$ sudo docker export "$id" | sudo tar -x -C /var/lib/docker/plugins/$id/rootfs
-$ sudo chgrp -R docker /var/lib/docker/plugins/
+$ docker build -t rootfsimage .
+$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created
+$ sudo mkdir -p myplugin/rootfs
+$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs
 $ docker rm -vf "$id"
 $ docker rm -vf "$id"
-$ docker rmi rootfs
+$ docker rmi rootfsimage
 ```
 ```
 
 
-#### The config.json and plugin-settings.json files
+#### The config.json file
 
 
-The `config.json` file describes the plugin. The `plugin-settings.json` file
-contains runtime parameters and is only required if your plugin has runtime
-parameters. [See the Plugins Config reference](config.md).
+The `config.json` file describes the plugin. See the [plugins config reference](config.md).
 
 
 Consider the following `config.json` file.
 Consider the following `config.json` file.
 
 
@@ -242,56 +152,15 @@ Consider the following `config.json` file.
 This plugin is a volume driver. It requires a `host` network and the
 This plugin is a volume driver. It requires a `host` network and the
 `CAP_SYS_ADMIN` capability. It depends upon the `/go/bin/docker-volume-sshfs`
 `CAP_SYS_ADMIN` capability. It depends upon the `/go/bin/docker-volume-sshfs`
 entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate
 entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate
-with Docker Engine.
-
-
-Consider the following `plugin-settings.json` file.
-
-```json
-{
-  "Devices": null,
-  "Args": null,
-  "Env": [],
-  "Mounts": []
-}
-```
-
-This plugin has no runtime parameters.
-
-Each of these JSON files is included as part of `plugins.json`, as you can see
-by looking back at the example above. After a plugin is installed, `config.json`
-is read-only, but `plugin-settings.json` is read-write, and includes all runtime
-configuration options for the plugin.
+with Docker Engine. This plugin has no runtime parameters.
 
 
 ### Creating the plugin
 ### Creating the plugin
 
 
-Follow these steps to create a plugin:
-
-1. Choose a name for the plugin. Plugin name uses the same format as images,
-   for example: `<repo_name>/<name>`.
-
-2. Create a `rootfs` and export it to `/var/lib/docker/plugins/$id/rootfs`
-   using `docker export`. See [The rootfs directory](#the-rootfs-directory) for
-   an example of creating a `rootfs`.
-
-3. Create a `config.json` file in `/var/lib/docker/plugins/$id/`.
-
-4. Create a `plugin-settings.json` file if needed.
-
-5. Create or add a section to `/var/lib/docker/plugins/plugins.json`. Use
-   `<user>/<name>` as “Name” and `$id` as “Id”.
-
-6. Restart the Docker Engine service.
-
-7. Run `docker plugin ls`.
-    * If your plugin is enabled, you can push it to the
-      registry.
-    * If the plugin is not listed or is disabled, something went wrong.
-      Check the daemon logs for errors.
-
-8. If you are not already logged in, use `docker login` to authenticate against
-   the registry so that you can push to it.
-
-9. Run `docker plugin push <repo_name>/<name>` to push the plugin.
+A new plugin can be created by running
+`docker plugin create <plugin-name> ./path/to/plugin/data` where the plugin
+data contains a plugin configuration file `config.json` and a root filesystem
+in subdirectory `rootfs`. 
 
 
-The plugin can now be used by any user with access to your registry.
+After that the plugin `<plugin-name>` will show up in `docker plugin ls`.
+Plugins can be pushed to remote registries with
+`docker plugin push <plugin-name>`.

+ 2 - 2
docs/reference/commandline/plugin_create.md

@@ -16,9 +16,9 @@ keywords: "plugin, create"
 # plugin create
 # plugin create
 
 
 ```markdown
 ```markdown
-Usage:  docker plugin create [OPTIONS] PLUGIN[:tag] PATH-TO-ROOTFS(rootfs + config.json)
+Usage:  docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR
 
 
-Create a plugin from a rootfs and configuration
+Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.
 
 
 Options:
 Options:
       --compress   Compress the context using gzip 
       --compress   Compress the context using gzip 

+ 4 - 2
docs/reference/commandline/plugin_disable.md

@@ -21,11 +21,13 @@ Usage:  docker plugin disable PLUGIN
 Disable a plugin
 Disable a plugin
 
 
 Options:
 Options:
-      --help   Print usage
+  -f, --force   Force the disable of an active plugin
+      --help    Print usage
 ```
 ```
 
 
 Disables a plugin. The plugin must be installed before it can be disabled,
 Disables a plugin. The plugin must be installed before it can be disabled,
-see [`docker plugin install`](plugin_install.md).
+see [`docker plugin install`](plugin_install.md). Without the `-f` option,
+a plugin that has references (eg, volumes, networks) cannot be disabled.
 
 
 
 
 The following example shows that the `no-remove` plugin is installed
 The following example shows that the `no-remove` plugin is installed

+ 1 - 2
docs/reference/commandline/plugin_inspect.md

@@ -36,8 +36,7 @@ $ docker plugin inspect tiborvass/no-remove:latest
 ```JSON
 ```JSON
 {
 {
   "Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21",
   "Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21",
-  "Name": "tiborvass/no-remove",
-  "Tag": "latest",
+  "Name": "tiborvass/no-remove:latest",
   "Enabled": true,
   "Enabled": true,
   "Config": {
   "Config": {
     "Mounts": [
     "Mounts": [

+ 1 - 0
docs/reference/commandline/plugin_install.md

@@ -21,6 +21,7 @@ Usage:  docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...]
 Install a plugin
 Install a plugin
 
 
 Options:
 Options:
+      --alias string            Local name for plugin
       --disable                 Do not enable the plugin on install
       --disable                 Do not enable the plugin on install
       --grant-all-permissions   Grant all permissions necessary to run the plugin
       --grant-all-permissions   Grant all permissions necessary to run the plugin
       --help                    Print usage
       --help                    Print usage

+ 2 - 2
integration-cli/docker_cli_authz_plugin_v2_test.go

@@ -11,10 +11,10 @@ import (
 )
 )
 
 
 var (
 var (
-	authzPluginName            = "riyaz/authz-no-volume-plugin"
+	authzPluginName            = "tonistiigi/authz-no-volume-plugin"
 	authzPluginTag             = "latest"
 	authzPluginTag             = "latest"
 	authzPluginNameWithTag     = authzPluginName + ":" + authzPluginTag
 	authzPluginNameWithTag     = authzPluginName + ":" + authzPluginTag
-	authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest"
+	authzPluginBadManifestName = "tonistiigi/authz-plugin-bad-manifest"
 	nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin"
 	nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin"
 )
 )
 
 

+ 9 - 2
integration-cli/docker_cli_daemon_plugins_test.go

@@ -290,10 +290,17 @@ func (s *DockerDaemonSuite) TestPluginVolumeRemoveOnRestart(c *check.C) {
 	s.d.Restart("--live-restore=true")
 	s.d.Restart("--live-restore=true")
 
 
 	out, err = s.d.Cmd("plugin", "disable", pName)
 	out, err = s.d.Cmd("plugin", "disable", pName)
-	c.Assert(err, checker.IsNil, check.Commentf(out))
-	out, err = s.d.Cmd("plugin", "rm", pName)
 	c.Assert(err, checker.NotNil, check.Commentf(out))
 	c.Assert(err, checker.NotNil, check.Commentf(out))
 	c.Assert(out, checker.Contains, "in use")
 	c.Assert(out, checker.Contains, "in use")
+
+	out, err = s.d.Cmd("volume", "rm", "test")
+	c.Assert(err, checker.IsNil, check.Commentf(out))
+
+	out, err = s.d.Cmd("plugin", "disable", pName)
+	c.Assert(err, checker.IsNil, check.Commentf(out))
+
+	out, err = s.d.Cmd("plugin", "rm", pName)
+	c.Assert(err, checker.IsNil, check.Commentf(out))
 }
 }
 
 
 func existsMountpointWithPrefix(mountpointPrefix string) (bool, error) {
 func existsMountpointWithPrefix(mountpointPrefix string) (bool, error) {

+ 6 - 6
integration-cli/docker_cli_inspect_test.go

@@ -425,20 +425,20 @@ func (s *DockerSuite) TestInspectPlugin(c *check.C) {
 
 
 	out, _, err := dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag)
 	out, _, err := dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
-	c.Assert(strings.TrimSpace(out), checker.Equals, pName)
+	c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag)
 
 
 	out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag)
 	out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
-	c.Assert(strings.TrimSpace(out), checker.Equals, pName)
+	c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag)
 
 
 	// Even without tag the inspect still work
 	// Even without tag the inspect still work
-	out, _, err = dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pName)
+	out, _, err = dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
-	c.Assert(strings.TrimSpace(out), checker.Equals, pName)
+	c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag)
 
 
-	out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pName)
+	out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
-	c.Assert(strings.TrimSpace(out), checker.Equals, pName)
+	c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag)
 
 
 	_, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag)
 	_, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)

+ 1 - 1
integration-cli/docker_cli_network_unix_test.go

@@ -772,7 +772,7 @@ func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *check.C) {
 	testRequires(c, DaemonIsLinux, IsAmd64, Network)
 	testRequires(c, DaemonIsLinux, IsAmd64, Network)
 
 
 	var (
 	var (
-		npName        = "tiborvass/test-docker-netplugin"
+		npName        = "tonistiigi/test-docker-netplugin"
 		npTag         = "latest"
 		npTag         = "latest"
 		npNameWithTag = npName + ":" + npTag
 		npNameWithTag = npName + ":" + npTag
 	)
 	)

+ 94 - 15
integration-cli/docker_cli_plugins_test.go

@@ -1,6 +1,9 @@
 package main
 package main
 
 
 import (
 import (
+	"fmt"
+	"os/exec"
+
 	"github.com/docker/docker/pkg/integration/checker"
 	"github.com/docker/docker/pkg/integration/checker"
 	"github.com/go-check/check"
 	"github.com/go-check/check"
 
 
@@ -12,7 +15,7 @@ import (
 
 
 var (
 var (
 	pluginProcessName = "sample-volume-plugin"
 	pluginProcessName = "sample-volume-plugin"
-	pName             = "tiborvass/sample-volume-plugin"
+	pName             = "tonistiigi/sample-volume-plugin"
 	pTag              = "latest"
 	pTag              = "latest"
 	pNameWithTag      = pName + ":" + pTag
 	pNameWithTag      = pName + ":" + pTag
 )
 )
@@ -64,23 +67,18 @@ func (s *DockerSuite) TestPluginForceRemove(c *check.C) {
 
 
 func (s *DockerSuite) TestPluginActive(c *check.C) {
 func (s *DockerSuite) TestPluginActive(c *check.C) {
 	testRequires(c, DaemonIsLinux, IsAmd64, Network)
 	testRequires(c, DaemonIsLinux, IsAmd64, Network)
-	out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag)
+	_, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
 
 
-	out, _, err = dockerCmdWithError("volume", "create", "-d", pNameWithTag)
+	_, _, err = dockerCmdWithError("volume", "create", "-d", pNameWithTag, "--name", "testvol1")
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
 
 
-	vID := strings.TrimSpace(out)
+	out, _, err := dockerCmdWithError("plugin", "disable", pNameWithTag)
+	c.Assert(out, checker.Contains, "in use")
 
 
-	out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag)
-	c.Assert(out, checker.Contains, "is in use")
-
-	_, _, err = dockerCmdWithError("volume", "rm", vID)
+	_, _, err = dockerCmdWithError("volume", "rm", "testvol1")
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
 
 
-	out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag)
-	c.Assert(out, checker.Contains, "is enabled")
-
 	_, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag)
 	_, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
 
 
@@ -144,11 +142,18 @@ func (s *DockerSuite) TestPluginInstallArgs(c *check.C) {
 	c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]")
 	c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]")
 }
 }
 
 
-func (s *DockerSuite) TestPluginInstallImage(c *check.C) {
-	testRequires(c, DaemonIsLinux, IsAmd64, Network)
-	out, _, err := dockerCmdWithError("plugin", "install", "redis")
+func (s *DockerRegistrySuite) TestPluginInstallImage(c *check.C) {
+	testRequires(c, DaemonIsLinux, IsAmd64)
+
+	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
+	// tag the image to upload it to the private registry
+	dockerCmd(c, "tag", "busybox", repoName)
+	// push the image to the registry
+	dockerCmd(c, "push", repoName)
+
+	out, _, err := dockerCmdWithError("plugin", "install", repoName)
 	c.Assert(err, checker.NotNil)
 	c.Assert(err, checker.NotNil)
-	c.Assert(out, checker.Contains, "content is not a plugin")
+	c.Assert(out, checker.Contains, "target is image")
 }
 }
 
 
 func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) {
 func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) {
@@ -184,6 +189,9 @@ func (s *DockerSuite) TestPluginCreate(c *check.C) {
 	err = ioutil.WriteFile(filepath.Join(temp, "config.json"), []byte(data), 0644)
 	err = ioutil.WriteFile(filepath.Join(temp, "config.json"), []byte(data), 0644)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
 
 
+	err = os.MkdirAll(filepath.Join(temp, "rootfs"), 0700)
+	c.Assert(err, checker.IsNil)
+
 	out, _, err := dockerCmdWithError("plugin", "create", name, temp)
 	out, _, err := dockerCmdWithError("plugin", "create", name, temp)
 	c.Assert(err, checker.IsNil)
 	c.Assert(err, checker.IsNil)
 	c.Assert(out, checker.Contains, name)
 	c.Assert(out, checker.Contains, name)
@@ -251,3 +259,74 @@ func (s *DockerSuite) TestPluginInspect(c *check.C) {
 	_, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5])
 	_, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5])
 	c.Assert(err, checker.NotNil)
 	c.Assert(err, checker.NotNil)
 }
 }
+
+// Test case for https://github.com/docker/docker/pull/29186#discussion_r91277345
+func (s *DockerSuite) TestPluginInspectOnWindows(c *check.C) {
+	// This test should work on Windows only
+	testRequires(c, DaemonIsWindows)
+
+	out, _, err := dockerCmdWithError("plugin", "inspect", "foobar")
+	c.Assert(err, checker.NotNil)
+	c.Assert(out, checker.Contains, "plugins are not supported on this platform")
+	c.Assert(err.Error(), checker.Contains, "plugins are not supported on this platform")
+}
+
+func (s *DockerTrustSuite) TestPluginTrustedInstall(c *check.C) {
+	testRequires(c, DaemonIsLinux, IsAmd64, Network)
+
+	trustedName := s.setupTrustedplugin(c, pNameWithTag, "trusted-plugin-install")
+
+	installCmd := exec.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", trustedName)
+	s.trustedCmd(installCmd)
+	out, _, err := runCommandWithOutput(installCmd)
+
+	c.Assert(strings.TrimSpace(out), checker.Contains, trustedName)
+	c.Assert(err, checker.IsNil)
+	c.Assert(strings.TrimSpace(out), checker.Contains, trustedName)
+
+	out, _, err = dockerCmdWithError("plugin", "ls")
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, checker.Contains, "true")
+
+	out, _, err = dockerCmdWithError("plugin", "disable", trustedName)
+	c.Assert(err, checker.IsNil)
+	c.Assert(strings.TrimSpace(out), checker.Contains, trustedName)
+
+	out, _, err = dockerCmdWithError("plugin", "enable", trustedName)
+	c.Assert(err, checker.IsNil)
+	c.Assert(strings.TrimSpace(out), checker.Contains, trustedName)
+
+	out, _, err = dockerCmdWithError("plugin", "rm", "-f", trustedName)
+	c.Assert(err, checker.IsNil)
+	c.Assert(strings.TrimSpace(out), checker.Contains, trustedName)
+
+	// Try untrusted pull to ensure we pushed the tag to the registry
+	installCmd = exec.Command(dockerBinary, "plugin", "install", "--disable-content-trust=true", "--grant-all-permissions", trustedName)
+	s.trustedCmd(installCmd)
+	out, _, err = runCommandWithOutput(installCmd)
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out))
+
+	out, _, err = dockerCmdWithError("plugin", "ls")
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, checker.Contains, "true")
+
+}
+
+func (s *DockerTrustSuite) TestPluginUntrustedInstall(c *check.C) {
+	testRequires(c, DaemonIsLinux, IsAmd64, Network)
+
+	pluginName := fmt.Sprintf("%v/dockercliuntrusted/plugintest:latest", privateRegistryURL)
+	// install locally and push to private registry
+	dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", pluginName, pNameWithTag)
+	dockerCmd(c, "plugin", "push", pluginName)
+	dockerCmd(c, "plugin", "rm", "-f", pluginName)
+
+	// Try trusted install on untrusted plugin
+	installCmd := exec.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", pluginName)
+	s.trustedCmd(installCmd)
+	out, _, err := runCommandWithOutput(installCmd)
+
+	c.Assert(err, check.NotNil, check.Commentf(out))
+	c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out))
+}

+ 2 - 2
integration-cli/docker_utils.go

@@ -30,7 +30,7 @@ import (
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/stringutils"
 	"github.com/docker/docker/pkg/stringutils"
 	"github.com/docker/go-connections/tlsconfig"
 	"github.com/docker/go-connections/tlsconfig"
-	"github.com/docker/go-units"
+	units "github.com/docker/go-units"
 	"github.com/go-check/check"
 	"github.com/go-check/check"
 )
 )
 
 
@@ -310,7 +310,7 @@ func deleteAllPlugins() error {
 	}
 	}
 	var errors []string
 	var errors []string
 	for _, p := range plugins {
 	for _, p := range plugins {
-		status, b, err := sockRequest("DELETE", "/plugins/"+p.Name+":"+p.Tag+"?force=1", nil)
+		status, b, err := sockRequest("DELETE", "/plugins/"+p.Name+"?force=1", nil)
 		if err != nil {
 		if err != nil {
 			errors = append(errors, err.Error())
 			errors = append(errors, err.Error())
 			continue
 			continue

+ 23 - 0
integration-cli/trust_server.go

@@ -211,6 +211,29 @@ func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string {
 	return repoName
 	return repoName
 }
 }
 
 
+func (s *DockerTrustSuite) setupTrustedplugin(c *check.C, source, name string) string {
+	repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name)
+	// tag the image and upload it to the private registry
+	dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", repoName, source)
+
+	pushCmd := exec.Command(dockerBinary, "plugin", "push", repoName)
+	s.trustedCmd(pushCmd)
+	out, _, err := runCommandWithOutput(pushCmd)
+
+	if err != nil {
+		c.Fatalf("Error running trusted plugin push: %s\n%s", err, out)
+	}
+	if !strings.Contains(string(out), "Signing and pushing trust metadata") {
+		c.Fatalf("Missing expected output on trusted push:\n%s", out)
+	}
+
+	if out, status := dockerCmd(c, "plugin", "rm", "-f", repoName); status != 0 {
+		c.Fatalf("Error removing plugin %q\n%s", repoName, out)
+	}
+
+	return repoName
+}
+
 func notaryClientEnv(cmd *exec.Cmd) {
 func notaryClientEnv(cmd *exec.Cmd) {
 	pwd := "12345678"
 	pwd := "12345678"
 	env := []string{
 	env := []string{

+ 11 - 0
pkg/progress/progress.go

@@ -44,6 +44,17 @@ func ChanOutput(progressChan chan<- Progress) Output {
 	return chanOutput(progressChan)
 	return chanOutput(progressChan)
 }
 }
 
 
+type discardOutput struct{}
+
+func (discardOutput) WriteProgress(Progress) error {
+	return nil
+}
+
+// DiscardOutput returns an Output that discards progress
+func DiscardOutput() Output {
+	return discardOutput{}
+}
+
 // Update is a convenience function to write a progress update to the channel.
 // Update is a convenience function to write a progress update to the channel.
 func Update(out Output, id, action string) {
 func Update(out Output, id, action string) {
 	out.WriteProgress(Progress{ID: id, Action: action})
 	out.WriteProgress(Progress{ID: id, Action: action})

+ 469 - 161
plugin/backend_linux.go

@@ -3,37 +3,39 @@
 package plugin
 package plugin
 
 
 import (
 import (
-	"bytes"
+	"archive/tar"
+	"compress/gzip"
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"net/http"
 	"net/http"
 	"os"
 	"os"
+	"path"
 	"path/filepath"
 	"path/filepath"
-	"reflect"
-	"regexp"
+	"strings"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/manifest/schema2"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/distribution"
+	progressutils "github.com/docker/docker/distribution/utils"
+	"github.com/docker/docker/distribution/xfer"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/chrootarchive"
-	"github.com/docker/docker/pkg/stringid"
-	"github.com/docker/docker/plugin/distribution"
+	"github.com/docker/docker/pkg/pools"
+	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/plugin/v2"
 	"github.com/docker/docker/plugin/v2"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
-var (
-	validFullID    = regexp.MustCompile(`^([a-f0-9]{64})$`)
-	validPartialID = regexp.MustCompile(`^([a-f0-9]{1,64})$`)
-)
-
-// Disable deactivates a plugin, which implies that they cannot be used by containers.
-func (pm *Manager) Disable(name string) error {
-	p, err := pm.pluginStore.GetByName(name)
+// Disable deactivates a plugin. This means resources (volumes, networks) cant use them.
+func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error {
+	p, err := pm.config.Store.GetV2Plugin(refOrID)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -41,16 +43,20 @@ func (pm *Manager) Disable(name string) error {
 	c := pm.cMap[p]
 	c := pm.cMap[p]
 	pm.mu.RUnlock()
 	pm.mu.RUnlock()
 
 
+	if !config.ForceDisable && p.GetRefCount() > 0 {
+		return fmt.Errorf("plugin %s is in use", p.Name())
+	}
+
 	if err := pm.disable(p, c); err != nil {
 	if err := pm.disable(p, c); err != nil {
 		return err
 		return err
 	}
 	}
-	pm.pluginEventLogger(p.GetID(), name, "disable")
+	pm.config.LogPluginEvent(p.GetID(), refOrID, "disable")
 	return nil
 	return nil
 }
 }
 
 
 // Enable activates a plugin, which implies that they are ready to be used by containers.
 // Enable activates a plugin, which implies that they are ready to be used by containers.
-func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error {
-	p, err := pm.pluginStore.GetByName(name)
+func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error {
+	p, err := pm.config.Store.GetV2Plugin(refOrID)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -59,71 +65,74 @@ func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error {
 	if err := pm.enable(p, c, false); err != nil {
 	if err := pm.enable(p, c, false); err != nil {
 		return err
 		return err
 	}
 	}
-	pm.pluginEventLogger(p.GetID(), name, "enable")
+	pm.config.LogPluginEvent(p.GetID(), refOrID, "enable")
 	return nil
 	return nil
 }
 }
 
 
 // Inspect examines a plugin config
 // Inspect examines a plugin config
-func (pm *Manager) Inspect(refOrID string) (tp types.Plugin, err error) {
-	// Match on full ID
-	if validFullID.MatchString(refOrID) {
-		p, err := pm.pluginStore.GetByID(refOrID)
-		if err == nil {
-			return p.PluginObj, nil
-		}
+func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) {
+	p, err := pm.config.Store.GetV2Plugin(refOrID)
+	if err != nil {
+		return nil, err
 	}
 	}
 
 
-	// Match on full name
-	if pluginName, err := getPluginName(refOrID); err == nil {
-		if p, err := pm.pluginStore.GetByName(pluginName); err == nil {
-			return p.PluginObj, nil
-		}
-	}
+	return &p.PluginObj, nil
+}
 
 
-	// Match on partial ID
-	if validPartialID.MatchString(refOrID) {
-		p, err := pm.pluginStore.Search(refOrID)
-		if err == nil {
-			return p.PluginObj, nil
-		}
-		return tp, err
+func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error {
+	if outStream != nil {
+		// Include a buffer so that slow client connections don't affect
+		// transfer performance.
+		progressChan := make(chan progress.Progress, 100)
+
+		writesDone := make(chan struct{})
+
+		defer func() {
+			close(progressChan)
+			<-writesDone
+		}()
+
+		var cancelFunc context.CancelFunc
+		ctx, cancelFunc = context.WithCancel(ctx)
+
+		go func() {
+			progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
+			close(writesDone)
+		}()
+
+		config.ProgressOutput = progress.ChanOutput(progressChan)
+	} else {
+		config.ProgressOutput = progress.DiscardOutput()
 	}
 	}
+	return distribution.Pull(ctx, ref, config)
+}
 
 
-	return tp, fmt.Errorf("no such plugin name or ID associated with %q", refOrID)
+type tempConfigStore struct {
+	config       []byte
+	configDigest digest.Digest
 }
 }
 
 
-func (pm *Manager) pull(name string, metaHeader http.Header, authConfig *types.AuthConfig) (reference.Named, distribution.PullData, error) {
-	ref, err := distribution.GetRef(name)
-	if err != nil {
-		logrus.Debugf("error in distribution.GetRef: %v", err)
-		return nil, nil, err
-	}
-	name = ref.String()
+func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) {
+	dgst := digest.FromBytes(c)
 
 
-	if p, _ := pm.pluginStore.GetByName(name); p != nil {
-		logrus.Debug("plugin already exists")
-		return nil, nil, fmt.Errorf("%s exists", name)
-	}
+	s.config = c
+	s.configDigest = dgst
 
 
-	pd, err := distribution.Pull(ref, pm.registryService, metaHeader, authConfig)
-	if err != nil {
-		logrus.Debugf("error in distribution.Pull(): %v", err)
-		return nil, nil, err
-	}
-	return ref, pd, nil
+	return dgst, nil
 }
 }
 
 
-func computePrivileges(pd distribution.PullData) (types.PluginPrivileges, error) {
-	config, err := pd.Config()
-	if err != nil {
-		return nil, err
+func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) {
+	if d != s.configDigest {
+		return nil, digest.ErrDigestNotFound
 	}
 	}
+	return s.config, nil
+}
 
 
-	var c types.PluginConfig
-	if err := json.Unmarshal(config, &c); err != nil {
-		return nil, err
-	}
+func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
+	return configToRootFS(c)
+}
 
 
+func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) {
 	var privileges types.PluginPrivileges
 	var privileges types.PluginPrivileges
 	if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" {
 	if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" {
 		privileges = append(privileges, types.PluginPrivilege{
 		privileges = append(privileges, types.PluginPrivilege{
@@ -169,67 +178,89 @@ func computePrivileges(pd distribution.PullData) (types.PluginPrivileges, error)
 }
 }
 
 
 // Privileges pulls a plugin config and computes the privileges required to install it.
 // Privileges pulls a plugin config and computes the privileges required to install it.
-func (pm *Manager) Privileges(name string, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
-	_, pd, err := pm.pull(name, metaHeader, authConfig)
-	if err != nil {
+func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
+	// create image store instance
+	cs := &tempConfigStore{}
+
+	// DownloadManager not defined because only pulling configuration.
+	pluginPullConfig := &distribution.ImagePullConfig{
+		Config: distribution.Config{
+			MetaHeaders:      metaHeader,
+			AuthConfig:       authConfig,
+			RegistryService:  pm.config.RegistryService,
+			ImageEventLogger: func(string, string, string) {},
+			ImageStore:       cs,
+		},
+		Schema2Types: distribution.PluginTypes,
+	}
+
+	if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil {
+		return nil, err
+	}
+
+	if cs.config == nil {
+		return nil, errors.New("no configuration pulled")
+	}
+	var config types.PluginConfig
+	if err := json.Unmarshal(cs.config, &config); err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	return computePrivileges(pd)
+
+	return computePrivileges(config)
 }
 }
 
 
 // Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
 // Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
-func (pm *Manager) Pull(name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges) (err error) {
-	ref, pd, err := pm.pull(name, metaHeader, authConfig)
+func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) {
+	pm.muGC.RLock()
+	defer pm.muGC.RUnlock()
+
+	// revalidate because Pull is public
+	nameref, err := reference.ParseNamed(name)
 	if err != nil {
 	if err != nil {
-		return err
+		return errors.Wrapf(err, "failed to parse %q", name)
 	}
 	}
+	name = reference.WithDefaultTag(nameref).String()
 
 
-	requiredPrivileges, err := computePrivileges(pd)
-	if err != nil {
+	if err := pm.config.Store.validateName(name); err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	if !reflect.DeepEqual(privileges, requiredPrivileges) {
-		return errors.New("incorrect privileges")
-	}
+	tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
+	defer os.RemoveAll(tmpRootFSDir)
 
 
-	pluginID := stringid.GenerateNonCryptoID()
-	pluginDir := filepath.Join(pm.libRoot, pluginID)
-	if err := os.MkdirAll(pluginDir, 0755); err != nil {
-		logrus.Debugf("error in MkdirAll: %v", err)
-		return err
+	dm := &downloadManager{
+		tmpDir:    tmpRootFSDir,
+		blobStore: pm.blobStore,
 	}
 	}
 
 
-	defer func() {
-		if err != nil {
-			if delErr := os.RemoveAll(pluginDir); delErr != nil {
-				logrus.Warnf("unable to remove %q from failed plugin pull: %v", pluginDir, delErr)
-			}
-		}
-	}()
+	pluginPullConfig := &distribution.ImagePullConfig{
+		Config: distribution.Config{
+			MetaHeaders:      metaHeader,
+			AuthConfig:       authConfig,
+			RegistryService:  pm.config.RegistryService,
+			ImageEventLogger: pm.config.LogPluginEvent,
+			ImageStore:       dm,
+		},
+		DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead
+		Schema2Types:    distribution.PluginTypes,
+	}
 
 
-	err = distribution.WritePullData(pd, filepath.Join(pm.libRoot, pluginID), true)
+	err = pm.pull(ctx, ref, pluginPullConfig, outStream)
 	if err != nil {
 	if err != nil {
-		logrus.Debugf("error in distribution.WritePullData(): %v", err)
+		go pm.GC()
 		return err
 		return err
 	}
 	}
 
 
-	tag := distribution.GetTag(ref)
-	p := v2.NewPlugin(ref.Name(), pluginID, pm.runRoot, pm.libRoot, tag)
-	err = p.InitPlugin()
-	if err != nil {
+	if _, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil {
 		return err
 		return err
 	}
 	}
-	pm.pluginStore.Add(p)
-
-	pm.pluginEventLogger(pluginID, ref.String(), "pull")
 
 
 	return nil
 	return nil
 }
 }
 
 
 // List displays the list of plugins and associated metadata.
 // List displays the list of plugins and associated metadata.
 func (pm *Manager) List() ([]types.Plugin, error) {
 func (pm *Manager) List() ([]types.Plugin, error) {
-	plugins := pm.pluginStore.GetAll()
+	plugins := pm.config.Store.GetAll()
 	out := make([]types.Plugin, 0, len(plugins))
 	out := make([]types.Plugin, 0, len(plugins))
 	for _, p := range plugins {
 	for _, p := range plugins {
 		out = append(out, p.PluginObj)
 		out = append(out, p.PluginObj)
@@ -238,38 +269,211 @@ func (pm *Manager) List() ([]types.Plugin, error) {
 }
 }
 
 
 // Push pushes a plugin to the store.
 // Push pushes a plugin to the store.
-func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.AuthConfig) error {
-	p, err := pm.pluginStore.GetByName(name)
+func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error {
+	p, err := pm.config.Store.GetV2Plugin(name)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	dest := filepath.Join(pm.libRoot, p.GetID())
-	config, err := ioutil.ReadFile(filepath.Join(dest, "config.json"))
+
+	ref, err := reference.ParseNamed(p.Name())
 	if err != nil {
 	if err != nil {
-		return err
+		return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name())
 	}
 	}
 
 
-	var dummy types.Plugin
-	err = json.Unmarshal(config, &dummy)
-	if err != nil {
-		return err
+	var po progress.Output
+	if outStream != nil {
+		// Include a buffer so that slow client connections don't affect
+		// transfer performance.
+		progressChan := make(chan progress.Progress, 100)
+
+		writesDone := make(chan struct{})
+
+		defer func() {
+			close(progressChan)
+			<-writesDone
+		}()
+
+		var cancelFunc context.CancelFunc
+		ctx, cancelFunc = context.WithCancel(ctx)
+
+		go func() {
+			progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
+			close(writesDone)
+		}()
+
+		po = progress.ChanOutput(progressChan)
+	} else {
+		po = progress.DiscardOutput()
+	}
+
+	// TODO: replace these with manager
+	is := &pluginConfigStore{
+		pm:     pm,
+		plugin: p,
+	}
+	ls := &pluginLayerProvider{
+		pm:     pm,
+		plugin: p,
+	}
+	rs := &pluginReference{
+		name:     ref,
+		pluginID: p.Config,
+	}
+
+	uploadManager := xfer.NewLayerUploadManager(3)
+
+	imagePushConfig := &distribution.ImagePushConfig{
+		Config: distribution.Config{
+			MetaHeaders:      metaHeader,
+			AuthConfig:       authConfig,
+			ProgressOutput:   po,
+			RegistryService:  pm.config.RegistryService,
+			ReferenceStore:   rs,
+			ImageEventLogger: pm.config.LogPluginEvent,
+			ImageStore:       is,
+			RequireSchema2:   true,
+		},
+		ConfigMediaType: schema2.MediaTypePluginConfig,
+		LayerStore:      ls,
+		UploadManager:   uploadManager,
+	}
+
+	return distribution.Push(ctx, ref, imagePushConfig)
+}
+
+type pluginReference struct {
+	name     reference.Named
+	pluginID digest.Digest
+}
+
+func (r *pluginReference) References(id digest.Digest) []reference.Named {
+	if r.pluginID != id {
+		return nil
 	}
 	}
+	return []reference.Named{r.name}
+}
 
 
-	rootfs, err := archive.Tar(p.Rootfs, archive.Gzip)
+func (r *pluginReference) ReferencesByName(ref reference.Named) []reference.Association {
+	return []reference.Association{
+		{
+			Ref: r.name,
+			ID:  r.pluginID,
+		},
+	}
+}
+
+func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) {
+	if r.name.String() != ref.String() {
+		return digest.Digest(""), reference.ErrDoesNotExist
+	}
+	return r.pluginID, nil
+}
+
+func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error {
+	// Read only, ignore
+	return nil
+}
+func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error {
+	// Read only, ignore
+	return nil
+}
+func (r *pluginReference) Delete(ref reference.Named) (bool, error) {
+	// Read only, ignore
+	return false, nil
+}
+
+type pluginConfigStore struct {
+	pm     *Manager
+	plugin *v2.Plugin
+}
+
+func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) {
+	return digest.Digest(""), errors.New("cannot store config on push")
+}
+
+func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) {
+	if s.plugin.Config != d {
+		return nil, errors.New("plugin not found")
+	}
+	rwc, err := s.pm.blobStore.Get(d)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
+	}
+	defer rwc.Close()
+	return ioutil.ReadAll(rwc)
+}
+
+func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) {
+	return configToRootFS(c)
+}
+
+type pluginLayerProvider struct {
+	pm     *Manager
+	plugin *v2.Plugin
+}
+
+func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) {
+	rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs)
+	var i int
+	for i = 1; i <= len(rootFS.DiffIDs); i++ {
+		if layer.CreateChainID(rootFS.DiffIDs[:i]) == id {
+			break
+		}
+	}
+	if i > len(rootFS.DiffIDs) {
+		return nil, errors.New("layer not found")
+	}
+	return &pluginLayer{
+		pm:      p.pm,
+		diffIDs: rootFS.DiffIDs[:i],
+		blobs:   p.plugin.Blobsums[:i],
+	}, nil
+}
+
+type pluginLayer struct {
+	pm      *Manager
+	diffIDs []layer.DiffID
+	blobs   []digest.Digest
+}
+
+func (l *pluginLayer) ChainID() layer.ChainID {
+	return layer.CreateChainID(l.diffIDs)
+}
+
+func (l *pluginLayer) DiffID() layer.DiffID {
+	return l.diffIDs[len(l.diffIDs)-1]
+}
+
+func (l *pluginLayer) Parent() distribution.PushLayer {
+	if len(l.diffIDs) == 1 {
+		return nil
+	}
+	return &pluginLayer{
+		pm:      l.pm,
+		diffIDs: l.diffIDs[:len(l.diffIDs)-1],
+		blobs:   l.blobs[:len(l.diffIDs)-1],
 	}
 	}
-	defer rootfs.Close()
+}
 
 
-	_, err = distribution.Push(name, pm.registryService, metaHeader, authConfig, ioutil.NopCloser(bytes.NewReader(config)), rootfs)
-	// XXX: Ignore returning digest for now.
-	// Since digest needs to be written to the ProgressWriter.
-	return err
+func (l *pluginLayer) Open() (io.ReadCloser, error) {
+	return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1])
+}
+
+func (l *pluginLayer) Size() (int64, error) {
+	return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1])
+}
+
+func (l *pluginLayer) MediaType() string {
+	return schema2.MediaTypeLayer
+}
+
+func (l *pluginLayer) Release() {
+	// Nothing needs to be release, no references held
 }
 }
 
 
 // Remove deletes plugin's root directory.
 // Remove deletes plugin's root directory.
-func (pm *Manager) Remove(name string, config *types.PluginRmConfig) (err error) {
-	p, err := pm.pluginStore.GetByName(name)
+func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
+	p, err := pm.config.Store.GetV2Plugin(name)
 	pm.mu.RLock()
 	pm.mu.RLock()
 	c := pm.cMap[p]
 	c := pm.cMap[p]
 	pm.mu.RUnlock()
 	pm.mu.RUnlock()
@@ -293,90 +497,194 @@ func (pm *Manager) Remove(name string, config *types.PluginRmConfig) (err error)
 		}
 		}
 	}
 	}
 
 
-	id := p.GetID()
-	pluginDir := filepath.Join(pm.libRoot, id)
-
 	defer func() {
 	defer func() {
-		if err == nil || config.ForceRemove {
-			pm.pluginStore.Remove(p)
-			pm.pluginEventLogger(id, name, "remove")
-		}
+		go pm.GC()
 	}()
 	}()
 
 
-	if err = os.RemoveAll(pluginDir); err != nil {
-		return errors.Wrap(err, "failed to remove plugin directory")
+	id := p.GetID()
+	pm.config.Store.Remove(p)
+	pluginDir := filepath.Join(pm.config.Root, id)
+	if err := os.RemoveAll(pluginDir); err != nil {
+		logrus.Warnf("unable to remove %q from plugin remove: %v", pluginDir, err)
 	}
 	}
+	pm.config.LogPluginEvent(id, name, "remove")
 	return nil
 	return nil
 }
 }
 
 
 // Set sets plugin args
 // Set sets plugin args
 func (pm *Manager) Set(name string, args []string) error {
 func (pm *Manager) Set(name string, args []string) error {
-	p, err := pm.pluginStore.GetByName(name)
+	p, err := pm.config.Store.GetV2Plugin(name)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	return p.Set(args)
+	if err := p.Set(args); err != nil {
+		return err
+	}
+	return pm.save(p)
 }
 }
 
 
 // CreateFromContext creates a plugin from the given pluginDir which contains
 // CreateFromContext creates a plugin from the given pluginDir which contains
 // both the rootfs and the config.json and a repoName with optional tag.
 // both the rootfs and the config.json and a repoName with optional tag.
-func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.Reader, options *types.PluginCreateOptions) error {
-	pluginID := stringid.GenerateNonCryptoID()
+func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) {
+	pm.muGC.RLock()
+	defer pm.muGC.RUnlock()
+
+	ref, err := reference.ParseNamed(options.RepoName)
+	if err != nil {
+		return errors.Wrapf(err, "failed to parse reference %v", options.RepoName)
+	}
+	if _, ok := ref.(reference.Canonical); ok {
+		return errors.Errorf("canonical references are not permitted")
+	}
+	name := reference.WithDefaultTag(ref).String()
 
 
-	pluginDir := filepath.Join(pm.libRoot, pluginID)
-	if err := os.MkdirAll(pluginDir, 0755); err != nil {
+	if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin()
 		return err
 		return err
 	}
 	}
 
 
-	// In case an error happens, remove the created directory.
-	if err := pm.createFromContext(ctx, pluginID, pluginDir, tarCtx, options); err != nil {
-		if err := os.RemoveAll(pluginDir); err != nil {
-			logrus.Warnf("unable to remove %q from failed plugin creation: %v", pluginDir, err)
-		}
+	tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
+	defer os.RemoveAll(tmpRootFSDir)
+	if err != nil {
+		return errors.Wrap(err, "failed to create temp directory")
+	}
+	var configJSON []byte
+	rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON)
+
+	rootFSBlob, err := pm.blobStore.New()
+	if err != nil {
 		return err
 		return err
 	}
 	}
+	defer rootFSBlob.Close()
+	gzw := gzip.NewWriter(rootFSBlob)
+	layerDigester := digest.Canonical.New()
+	rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash()))
 
 
-	return nil
-}
+	if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil {
+		return err
+	}
+	if err := rootFS.Close(); err != nil {
+		return err
+	}
+
+	if configJSON == nil {
+		return errors.New("config not found")
+	}
 
 
-func (pm *Manager) createFromContext(ctx context.Context, pluginID, pluginDir string, tarCtx io.Reader, options *types.PluginCreateOptions) error {
-	if err := chrootarchive.Untar(tarCtx, pluginDir, nil); err != nil {
+	if err := gzw.Close(); err != nil {
+		return errors.Wrap(err, "error closing gzip writer")
+	}
+
+	var config types.PluginConfig
+	if err := json.Unmarshal(configJSON, &config); err != nil {
+		return errors.Wrap(err, "failed to parse config")
+	}
+
+	if err := pm.validateConfig(config); err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	repoName := options.RepoName
-	ref, err := distribution.GetRef(repoName)
+	pm.mu.Lock()
+	defer pm.mu.Unlock()
+
+	rootFSBlobsum, err := rootFSBlob.Commit()
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	name := ref.Name()
-	tag := distribution.GetTag(ref)
+	defer func() {
+		if err != nil {
+			go pm.GC()
+		}
+	}()
+
+	config.Rootfs = &types.PluginConfigRootfs{
+		Type:    "layers",
+		DiffIds: []string{layerDigester.Digest().String()},
+	}
 
 
-	p := v2.NewPlugin(name, pluginID, pm.runRoot, pm.libRoot, tag)
-	if err := p.InitPlugin(); err != nil {
+	configBlob, err := pm.blobStore.New()
+	if err != nil {
+		return err
+	}
+	defer configBlob.Close()
+	if err := json.NewEncoder(configBlob).Encode(config); err != nil {
+		return errors.Wrap(err, "error encoding json config")
+	}
+	configBlobsum, err := configBlob.Commit()
+	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	if err := pm.pluginStore.Add(p); err != nil {
+	p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil)
+	if err != nil {
 		return err
 		return err
 	}
 	}
 
 
-	pm.pluginEventLogger(p.GetID(), repoName, "create")
+	pm.config.LogPluginEvent(p.PluginObj.ID, name, "create")
 
 
 	return nil
 	return nil
 }
 }
 
 
-func getPluginName(name string) (string, error) {
-	named, err := reference.ParseNamed(name) // FIXME: validate
-	if err != nil {
-		return "", err
-	}
-	if reference.IsNameOnly(named) {
-		named = reference.WithDefaultTag(named)
-	}
-	ref, ok := named.(reference.NamedTagged)
-	if !ok {
-		return "", fmt.Errorf("invalid name: %s", named.String())
-	}
-	return ref.String(), nil
+func (pm *Manager) validateConfig(config types.PluginConfig) error {
+	return nil // TODO:
+}
+
+func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser {
+	pr, pw := io.Pipe()
+	go func() {
+		tarReader := tar.NewReader(in)
+		tarWriter := tar.NewWriter(pw)
+		defer in.Close()
+
+		hasRootFS := false
+
+		for {
+			hdr, err := tarReader.Next()
+			if err == io.EOF {
+				if !hasRootFS {
+					pw.CloseWithError(errors.Wrap(err, "no rootfs found"))
+					return
+				}
+				// Signals end of archive.
+				tarWriter.Close()
+				pw.Close()
+				return
+			}
+			if err != nil {
+				pw.CloseWithError(errors.Wrap(err, "failed to read from tar"))
+				return
+			}
+
+			content := io.Reader(tarReader)
+			name := path.Clean(hdr.Name)
+			if path.IsAbs(name) {
+				name = name[1:]
+			}
+			if name == configFileName {
+				dt, err := ioutil.ReadAll(content)
+				if err != nil {
+					pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName))
+					return
+				}
+				*config = dt
+			}
+			if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName {
+				hdr.Name = path.Clean(path.Join(parts[1:]...))
+				if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") {
+					hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:]
+				}
+				if err := tarWriter.WriteHeader(hdr); err != nil {
+					pw.CloseWithError(errors.Wrap(err, "error writing tar header"))
+					return
+				}
+				if _, err := pools.Copy(tarWriter, content); err != nil {
+					pw.CloseWithError(errors.Wrap(err, "error copying tar data"))
+					return
+				}
+				hasRootFS = true
+			} else {
+				io.Copy(ioutil.Discard, content)
+			}
+		}
+	}()
+	return pr
 }
 }

+ 8 - 11
plugin/backend_unsupported.go

@@ -4,18 +4,18 @@ package plugin
 
 
 import (
 import (
 	"errors"
 	"errors"
-	"fmt"
 	"io"
 	"io"
 	"net/http"
 	"net/http"
 
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/reference"
 	"golang.org/x/net/context"
 	"golang.org/x/net/context"
 )
 )
 
 
 var errNotSupported = errors.New("plugins are not supported on this platform")
 var errNotSupported = errors.New("plugins are not supported on this platform")
 
 
 // Disable deactivates a plugin, which implies that they cannot be used by containers.
 // Disable deactivates a plugin, which implies that they cannot be used by containers.
-func (pm *Manager) Disable(name string) error {
+func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error {
 	return errNotSupported
 	return errNotSupported
 }
 }
 
 
@@ -25,20 +25,17 @@ func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error {
 }
 }
 
 
 // Inspect examines a plugin config
 // Inspect examines a plugin config
-func (pm *Manager) Inspect(refOrID string) (tp types.Plugin, err error) {
-	// Even though plugin is not supported, we still want to return `not found`
-	// error so that `docker inspect` (without `--type` specified) returns correct
-	// `not found` message
-	return tp, fmt.Errorf("no such plugin name or ID associated with %q", refOrID)
+func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) {
+	return nil, errNotSupported
 }
 }
 
 
 // Privileges pulls a plugin config and computes the privileges required to install it.
 // Privileges pulls a plugin config and computes the privileges required to install it.
-func (pm *Manager) Privileges(name string, metaHeaders http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
+func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
 	return nil, errNotSupported
 	return nil, errNotSupported
 }
 }
 
 
 // Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
 // Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
-func (pm *Manager) Pull(name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges) error {
+func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer) error {
 	return errNotSupported
 	return errNotSupported
 }
 }
 
 
@@ -48,7 +45,7 @@ func (pm *Manager) List() ([]types.Plugin, error) {
 }
 }
 
 
 // Push pushes a plugin to the store.
 // Push pushes a plugin to the store.
-func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.AuthConfig) error {
+func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, out io.Writer) error {
 	return errNotSupported
 	return errNotSupported
 }
 }
 
 
@@ -64,6 +61,6 @@ func (pm *Manager) Set(name string, args []string) error {
 
 
 // CreateFromContext creates a plugin from the given pluginDir which contains
 // CreateFromContext creates a plugin from the given pluginDir which contains
 // both the rootfs and the config.json and a repoName with optional tag.
 // both the rootfs and the config.json and a repoName with optional tag.
-func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.Reader, options *types.PluginCreateOptions) error {
+func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error {
 	return errNotSupported
 	return errNotSupported
 }
 }

+ 181 - 0
plugin/blobstore.go

@@ -0,0 +1,181 @@
+package plugin
+
+import (
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/distribution/xfer"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/layer"
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/progress"
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
+)
+
+type blobstore interface {
+	New() (WriteCommitCloser, error)
+	Get(dgst digest.Digest) (io.ReadCloser, error)
+	Size(dgst digest.Digest) (int64, error)
+}
+
+type basicBlobStore struct {
+	path string
+}
+
+func newBasicBlobStore(p string) (*basicBlobStore, error) {
+	tmpdir := filepath.Join(p, "tmp")
+	if err := os.MkdirAll(tmpdir, 0700); err != nil {
+		return nil, errors.Wrapf(err, "failed to mkdir %v", p)
+	}
+	return &basicBlobStore{path: p}, nil
+}
+
+func (b *basicBlobStore) New() (WriteCommitCloser, error) {
+	f, err := ioutil.TempFile(filepath.Join(b.path, "tmp"), ".insertion")
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to create temp file")
+	}
+	return newInsertion(f), nil
+}
+
+func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) {
+	return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex()))
+}
+
+func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) {
+	stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex()))
+	if err != nil {
+		return 0, err
+	}
+	return stat.Size(), nil
+}
+
+func (b *basicBlobStore) gc(whitelist map[digest.Digest]struct{}) {
+	for _, alg := range []string{string(digest.Canonical)} {
+		items, err := ioutil.ReadDir(filepath.Join(b.path, alg))
+		if err != nil {
+			continue
+		}
+		for _, fi := range items {
+			if _, exists := whitelist[digest.Digest(alg+":"+fi.Name())]; !exists {
+				p := filepath.Join(b.path, alg, fi.Name())
+				err := os.RemoveAll(p)
+				logrus.Debugf("cleaned up blob %v: %v", p, err)
+			}
+		}
+	}
+
+}
+
+// WriteCommitCloser defines object that can be committed to blobstore.
+type WriteCommitCloser interface {
+	io.WriteCloser
+	Commit() (digest.Digest, error)
+}
+
+type insertion struct {
+	io.Writer
+	f        *os.File
+	digester digest.Digester
+	closed   bool
+}
+
+func newInsertion(tempFile *os.File) *insertion {
+	digester := digest.Canonical.New()
+	return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())}
+}
+
+func (i *insertion) Commit() (digest.Digest, error) {
+	p := i.f.Name()
+	d := filepath.Join(filepath.Join(p, "../../"))
+	i.f.Sync()
+	defer os.RemoveAll(p)
+	if err := i.f.Close(); err != nil {
+		return "", err
+	}
+	i.closed = true
+	dgst := i.digester.Digest()
+	if err := os.MkdirAll(filepath.Join(d, string(dgst.Algorithm())), 0700); err != nil {
+		return "", errors.Wrapf(err, "failed to mkdir %v", d)
+	}
+	if err := os.Rename(p, filepath.Join(d, string(dgst.Algorithm()), dgst.Hex())); err != nil {
+		return "", errors.Wrapf(err, "failed to rename %v", p)
+	}
+	return dgst, nil
+}
+
+func (i *insertion) Close() error {
+	if i.closed {
+		return nil
+	}
+	defer os.RemoveAll(i.f.Name())
+	return i.f.Close()
+}
+
+type downloadManager struct {
+	blobStore    blobstore
+	tmpDir       string
+	blobs        []digest.Digest
+	configDigest digest.Digest
+}
+
+func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
+	for _, l := range layers {
+		b, err := dm.blobStore.New()
+		if err != nil {
+			return initialRootFS, nil, err
+		}
+		defer b.Close()
+		rc, _, err := l.Download(ctx, progressOutput)
+		if err != nil {
+			return initialRootFS, nil, errors.Wrap(err, "failed to download")
+		}
+		defer rc.Close()
+		r := io.TeeReader(rc, b)
+		inflatedLayerData, err := archive.DecompressStream(r)
+		if err != nil {
+			return initialRootFS, nil, err
+		}
+		digester := digest.Canonical.New()
+		if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil {
+			return initialRootFS, nil, err
+		}
+		initialRootFS.Append(layer.DiffID(digester.Digest()))
+		d, err := b.Commit()
+		if err != nil {
+			return initialRootFS, nil, err
+		}
+		dm.blobs = append(dm.blobs, d)
+	}
+	return initialRootFS, nil, nil
+}
+
+func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) {
+	b, err := dm.blobStore.New()
+	if err != nil {
+		return "", err
+	}
+	defer b.Close()
+	n, err := b.Write(dt)
+	if err != nil {
+		return "", err
+	}
+	if n != len(dt) {
+		return "", io.ErrShortWrite
+	}
+	d, err := b.Commit()
+	dm.configDigest = d
+	return d, err
+}
+
+func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) {
+	return nil, digest.ErrDigestNotFound
+}
+func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) {
+	return configToRootFS(c)
+}

+ 1 - 6
plugin/store/defs.go → plugin/defs.go

@@ -1,7 +1,6 @@
-package store
+package plugin
 
 
 import (
 import (
-	"path/filepath"
 	"sync"
 	"sync"
 
 
 	"github.com/docker/docker/pkg/plugins"
 	"github.com/docker/docker/pkg/plugins"
@@ -16,8 +15,6 @@ type Store struct {
 	 * to the new model. Legacy plugins use Handle() for registering an
 	 * to the new model. Legacy plugins use Handle() for registering an
 	 * activation callback.*/
 	 * activation callback.*/
 	handlers map[string][]func(string, *plugins.Client)
 	handlers map[string][]func(string, *plugins.Client)
-	nameToID map[string]string
-	plugindb string
 }
 }
 
 
 // NewStore creates a Store.
 // NewStore creates a Store.
@@ -25,7 +22,5 @@ func NewStore(libRoot string) *Store {
 	return &Store{
 	return &Store{
 		plugins:  make(map[string]*v2.Plugin),
 		plugins:  make(map[string]*v2.Plugin),
 		handlers: make(map[string][]func(string, *plugins.Client)),
 		handlers: make(map[string][]func(string, *plugins.Client)),
-		nameToID: make(map[string]string),
-		plugindb: filepath.Join(libRoot, "plugins", "plugins.json"),
 	}
 	}
 }
 }

+ 0 - 222
plugin/distribution/pull.go

@@ -1,222 +0,0 @@
-package distribution
-
-import (
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"os"
-	"path/filepath"
-
-	"github.com/Sirupsen/logrus"
-	"github.com/docker/distribution"
-	"github.com/docker/distribution/manifest/schema2"
-	"github.com/docker/docker/api/types"
-	dockerdist "github.com/docker/docker/distribution"
-	archive "github.com/docker/docker/pkg/chrootarchive"
-	"github.com/docker/docker/reference"
-	"github.com/docker/docker/registry"
-	"golang.org/x/net/context"
-)
-
-// PullData is the plugin config and the rootfs
-type PullData interface {
-	Config() ([]byte, error)
-	Layer() (io.ReadCloser, error)
-}
-
-type pullData struct {
-	repository distribution.Repository
-	manifest   schema2.Manifest
-	index      int
-}
-
-func (pd *pullData) Config() ([]byte, error) {
-	blobs := pd.repository.Blobs(context.Background())
-	config, err := blobs.Get(context.Background(), pd.manifest.Config.Digest)
-	if err != nil {
-		return nil, err
-	}
-	// validate
-	var p types.Plugin
-	if err := json.Unmarshal(config, &p); err != nil {
-		return nil, err
-	}
-	return config, nil
-}
-
-func (pd *pullData) Layer() (io.ReadCloser, error) {
-	if pd.index >= len(pd.manifest.Layers) {
-		return nil, io.EOF
-	}
-
-	blobs := pd.repository.Blobs(context.Background())
-	rsc, err := blobs.Open(context.Background(), pd.manifest.Layers[pd.index].Digest)
-	if err != nil {
-		return nil, err
-	}
-	pd.index++
-	return rsc, nil
-}
-
-// GetRef returns the distribution reference for a given name.
-func GetRef(name string) (reference.Named, error) {
-	ref, err := reference.ParseNamed(name)
-	if err != nil {
-		return nil, err
-	}
-	return ref, nil
-}
-
-// GetTag returns the tag associated with the given reference name.
-func GetTag(ref reference.Named) string {
-	tag := DefaultTag
-	if ref, ok := ref.(reference.NamedTagged); ok {
-		tag = ref.Tag()
-	}
-	return tag
-}
-
-// Pull downloads the plugin from Store
-func Pull(ref reference.Named, rs registry.Service, metaheader http.Header, authConfig *types.AuthConfig) (PullData, error) {
-	repoInfo, err := rs.ResolveRepository(ref)
-	if err != nil {
-		logrus.Debugf("pull.go: error in ResolveRepository: %v", err)
-		return nil, err
-	}
-	repoInfo.Class = "plugin"
-
-	if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil {
-		logrus.Debugf("pull.go: error in ValidateRepoName: %v", err)
-		return nil, err
-	}
-
-	endpoints, err := rs.LookupPullEndpoints(repoInfo.Hostname())
-	if err != nil {
-		logrus.Debugf("pull.go: error in LookupPullEndpoints: %v", err)
-		return nil, err
-	}
-
-	var confirmedV2 bool
-	var repository distribution.Repository
-
-	for _, endpoint := range endpoints {
-		if confirmedV2 && endpoint.Version == registry.APIVersion1 {
-			logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
-			continue
-		}
-
-		// TODO: reuse contexts
-		repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaheader, authConfig, "pull")
-		if err != nil {
-			logrus.Debugf("pull.go: error in NewV2Repository: %v", err)
-			return nil, err
-		}
-		if !confirmedV2 {
-			logrus.Debug("pull.go: !confirmedV2")
-			return nil, ErrUnsupportedRegistry
-		}
-		logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version)
-		break
-	}
-
-	tag := DefaultTag
-	if ref, ok := ref.(reference.NamedTagged); ok {
-		tag = ref.Tag()
-	}
-
-	// tags := repository.Tags(context.Background())
-	// desc, err := tags.Get(context.Background(), tag)
-	// 	if err != nil {
-	// 		return nil, err
-	// 	}
-	//
-	msv, err := repository.Manifests(context.Background())
-	if err != nil {
-		logrus.Debugf("pull.go: error in repository.Manifests: %v", err)
-		return nil, err
-	}
-	manifest, err := msv.Get(context.Background(), "", distribution.WithTag(tag))
-	if err != nil {
-		logrus.Debugf("pull.go: error in msv.Get(): %v", err)
-		return nil, dockerdist.TranslatePullError(err, repoInfo)
-	}
-
-	_, pl, err := manifest.Payload()
-	if err != nil {
-		logrus.Debugf("pull.go: error in manifest.Payload(): %v", err)
-		return nil, err
-	}
-	var m schema2.Manifest
-	if err := json.Unmarshal(pl, &m); err != nil {
-		logrus.Debugf("pull.go: error in json.Unmarshal(): %v", err)
-		return nil, err
-	}
-	if m.Config.MediaType != schema2.MediaTypePluginConfig {
-		return nil, ErrUnsupportedMediaType
-	}
-
-	pd := &pullData{
-		repository: repository,
-		manifest:   m,
-	}
-
-	logrus.Debugf("manifest: %s", pl)
-	return pd, nil
-}
-
-// WritePullData extracts manifest and rootfs to the disk.
-func WritePullData(pd PullData, dest string, extract bool) error {
-	config, err := pd.Config()
-	if err != nil {
-		return err
-	}
-	var p types.Plugin
-	if err := json.Unmarshal(config, &p); err != nil {
-		return err
-	}
-	logrus.Debugf("plugin: %#v", p)
-
-	if err := os.MkdirAll(dest, 0700); err != nil {
-		return err
-	}
-
-	if extract {
-		if err := ioutil.WriteFile(filepath.Join(dest, "config.json"), config, 0600); err != nil {
-			return err
-		}
-
-		if err := os.MkdirAll(filepath.Join(dest, "rootfs"), 0700); err != nil {
-			return err
-		}
-	}
-
-	for i := 0; ; i++ {
-		l, err := pd.Layer()
-		if err == io.EOF {
-			break
-		}
-		if err != nil {
-			return err
-		}
-
-		if !extract {
-			f, err := os.Create(filepath.Join(dest, fmt.Sprintf("layer%d.tar", i)))
-			if err != nil {
-				l.Close()
-				return err
-			}
-			io.Copy(f, l)
-			l.Close()
-			f.Close()
-			continue
-		}
-
-		if _, err := archive.ApplyLayer(filepath.Join(dest, "rootfs"), l); err != nil {
-			return err
-		}
-
-	}
-	return nil
-}

+ 0 - 134
plugin/distribution/push.go

@@ -1,134 +0,0 @@
-package distribution
-
-import (
-	"crypto/sha256"
-	"io"
-	"net/http"
-
-	"github.com/Sirupsen/logrus"
-	"github.com/docker/distribution"
-	"github.com/docker/distribution/digest"
-	"github.com/docker/distribution/manifest/schema2"
-	"github.com/docker/docker/api/types"
-	dockerdist "github.com/docker/docker/distribution"
-	"github.com/docker/docker/reference"
-	"github.com/docker/docker/registry"
-	"golang.org/x/net/context"
-)
-
-// Push pushes a plugin to a registry.
-func Push(name string, rs registry.Service, metaHeader http.Header, authConfig *types.AuthConfig, config io.ReadCloser, layers io.ReadCloser) (digest.Digest, error) {
-	ref, err := reference.ParseNamed(name)
-	if err != nil {
-		return "", err
-	}
-
-	repoInfo, err := rs.ResolveRepository(ref)
-	if err != nil {
-		return "", err
-	}
-	repoInfo.Class = "plugin"
-
-	if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil {
-		return "", err
-	}
-
-	endpoints, err := rs.LookupPushEndpoints(repoInfo.Hostname())
-	if err != nil {
-		return "", err
-	}
-
-	var confirmedV2 bool
-	var repository distribution.Repository
-	for _, endpoint := range endpoints {
-		if confirmedV2 && endpoint.Version == registry.APIVersion1 {
-			logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
-			continue
-		}
-		repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaHeader, authConfig, "push", "pull")
-		if err != nil {
-			return "", err
-		}
-		if !confirmedV2 {
-			return "", ErrUnsupportedRegistry
-		}
-		logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version)
-		// This means that we found an endpoint. and we are ready to push
-		break
-	}
-
-	// Returns a reference to the repository's blob service.
-	blobs := repository.Blobs(context.Background())
-
-	// Descriptor = {mediaType, size, digest}
-	var descs []distribution.Descriptor
-
-	for i, f := range []io.ReadCloser{config, layers} {
-		bw, err := blobs.Create(context.Background())
-		if err != nil {
-			logrus.Debugf("Error in blobs.Create: %v", err)
-			return "", err
-		}
-		h := sha256.New()
-		r := io.TeeReader(f, h)
-		_, err = io.Copy(bw, r)
-		if err != nil {
-			f.Close()
-			logrus.Debugf("Error in io.Copy: %v", err)
-			return "", err
-		}
-		f.Close()
-		mt := schema2.MediaTypeLayer
-		if i == 0 {
-			mt = schema2.MediaTypePluginConfig
-		}
-		// Commit completes the write process to the BlobService.
-		// The descriptor arg to Commit is called the "provisional" descriptor and
-		// used for validation.
-		// The returned descriptor should be the one used. Its called the "Canonical"
-		// descriptor.
-		desc, err := bw.Commit(context.Background(), distribution.Descriptor{
-			MediaType: mt,
-			// XXX: What about the Size?
-			Digest: digest.NewDigest("sha256", h),
-		})
-		if err != nil {
-			logrus.Debugf("Error in bw.Commit: %v", err)
-			return "", err
-		}
-		// The canonical descriptor is set the mediatype again, just in case.
-		// Don't touch the digest or the size here.
-		desc.MediaType = mt
-		logrus.Debugf("pushed blob: %s %s", desc.MediaType, desc.Digest)
-		descs = append(descs, desc)
-	}
-
-	// XXX: schema2.Versioned needs a MediaType as well.
-	// "application/vnd.docker.distribution.manifest.v2+json"
-	m, err := schema2.FromStruct(schema2.Manifest{Versioned: schema2.SchemaVersion, Config: descs[0], Layers: descs[1:]})
-	if err != nil {
-		logrus.Debugf("error in schema2.FromStruct: %v", err)
-		return "", err
-	}
-
-	msv, err := repository.Manifests(context.Background())
-	if err != nil {
-		logrus.Debugf("error in repository.Manifests: %v", err)
-		return "", err
-	}
-
-	_, pl, err := m.Payload()
-	if err != nil {
-		logrus.Debugf("error in m.Payload: %v", err)
-		return "", err
-	}
-
-	logrus.Debugf("Pushed manifest: %s", pl)
-
-	tag := DefaultTag
-	if tagged, ok := ref.(reference.NamedTagged); ok {
-		tag = tagged.Tag()
-	}
-
-	return msv.Put(context.Background(), m, distribution.WithTag(tag))
-}

+ 0 - 12
plugin/distribution/types.go

@@ -1,12 +0,0 @@
-package distribution
-
-import "errors"
-
-// ErrUnsupportedRegistry indicates that the registry does not support v2 protocol
-var ErrUnsupportedRegistry = errors.New("only V2 repositories are supported for plugin distribution")
-
-// ErrUnsupportedMediaType indicates we are pulling content that's not a plugin
-var ErrUnsupportedMediaType = errors.New("content is not a plugin")
-
-// DefaultTag is the default tag for plugins
-const DefaultTag = "latest"

+ 166 - 58
plugin/manager.go

@@ -3,25 +3,34 @@ package plugin
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"io"
 	"io"
+	"io/ioutil"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
+	"reflect"
+	"regexp"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/layer"
 	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/libcontainerd"
+	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/mount"
-	"github.com/docker/docker/plugin/store"
 	"github.com/docker/docker/plugin/v2"
 	"github.com/docker/docker/plugin/v2"
+	"github.com/docker/docker/reference"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/registry"
+	"github.com/pkg/errors"
 )
 )
 
 
-var (
-	manager *Manager
-)
+const configFileName = "config.json"
+const rootFSFileName = "rootfs"
+
+var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`)
 
 
 func (pm *Manager) restorePlugin(p *v2.Plugin) error {
 func (pm *Manager) restorePlugin(p *v2.Plugin) error {
-	p.Restore(pm.runRoot)
 	if p.IsEnabled() {
 	if p.IsEnabled() {
 		return pm.restore(p)
 		return pm.restore(p)
 	}
 	}
@@ -30,17 +39,25 @@ func (pm *Manager) restorePlugin(p *v2.Plugin) error {
 
 
 type eventLogger func(id, name, action string)
 type eventLogger func(id, name, action string)
 
 
+// ManagerConfig defines configuration needed to start new manager.
+type ManagerConfig struct {
+	Store              *Store // remove
+	Executor           libcontainerd.Remote
+	RegistryService    registry.Service
+	LiveRestoreEnabled bool // TODO: remove
+	LogPluginEvent     eventLogger
+	Root               string
+	ExecRoot           string
+}
+
 // Manager controls the plugin subsystem.
 // Manager controls the plugin subsystem.
 type Manager struct {
 type Manager struct {
-	libRoot           string
-	runRoot           string
-	pluginStore       *store.Store
-	containerdClient  libcontainerd.Client
-	registryService   registry.Service
-	liveRestore       bool
-	pluginEventLogger eventLogger
-	mu                sync.RWMutex // protects cMap
-	cMap              map[*v2.Plugin]*controller
+	config           ManagerConfig
+	mu               sync.RWMutex // protects cMap
+	muGC             sync.RWMutex // protects blobstore deletions
+	cMap             map[*v2.Plugin]*controller
+	containerdClient libcontainerd.Client
+	blobStore        *basicBlobStore
 }
 }
 
 
 // controller represents the manager's control on a plugin.
 // controller represents the manager's control on a plugin.
@@ -50,39 +67,56 @@ type controller struct {
 	timeoutInSecs int
 	timeoutInSecs int
 }
 }
 
 
-// GetManager returns the singleton plugin Manager
-func GetManager() *Manager {
-	return manager
+// pluginRegistryService ensures that all resolved repositories
+// are of the plugin class.
+type pluginRegistryService struct {
+	registry.Service
 }
 }
 
 
-// Init (was NewManager) instantiates the singleton Manager.
-// TODO: revert this to NewManager once we get rid of all the singletons.
-func Init(root string, ps *store.Store, remote libcontainerd.Remote, rs registry.Service, liveRestore bool, evL eventLogger) (err error) {
-	if manager != nil {
-		return nil
+func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) {
+	repoInfo, err = s.Service.ResolveRepository(name)
+	if repoInfo != nil {
+		repoInfo.Class = "plugin"
 	}
 	}
+	return
+}
 
 
-	root = filepath.Join(root, "plugins")
-	manager = &Manager{
-		libRoot:           root,
-		runRoot:           "/run/docker/plugins",
-		pluginStore:       ps,
-		registryService:   rs,
-		liveRestore:       liveRestore,
-		pluginEventLogger: evL,
+// NewManager returns a new plugin manager.
+func NewManager(config ManagerConfig) (*Manager, error) {
+	if config.RegistryService != nil {
+		config.RegistryService = pluginRegistryService{config.RegistryService}
 	}
 	}
-	if err := os.MkdirAll(manager.runRoot, 0700); err != nil {
-		return err
+	manager := &Manager{
+		config: config,
+	}
+	if err := os.MkdirAll(manager.config.Root, 0700); err != nil {
+		return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.Root)
 	}
 	}
-	manager.containerdClient, err = remote.Client(manager)
+	if err := os.MkdirAll(manager.config.ExecRoot, 0700); err != nil {
+		return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.ExecRoot)
+	}
+	if err := os.MkdirAll(manager.tmpDir(), 0700); err != nil {
+		return nil, errors.Wrapf(err, "failed to mkdir %v", manager.tmpDir())
+	}
+	var err error
+	manager.containerdClient, err = config.Executor.Client(manager) // todo: move to another struct
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, errors.Wrap(err, "failed to create containerd client")
 	}
 	}
+	manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs"))
+	if err != nil {
+		return nil, err
+	}
+
 	manager.cMap = make(map[*v2.Plugin]*controller)
 	manager.cMap = make(map[*v2.Plugin]*controller)
 	if err := manager.reload(); err != nil {
 	if err := manager.reload(); err != nil {
-		return err
+		return nil, errors.Wrap(err, "failed to restore plugins")
 	}
 	}
-	return nil
+	return manager, nil
+}
+
+func (pm *Manager) tmpDir() string {
+	return filepath.Join(pm.config.Root, "tmp")
 }
 }
 
 
 // StateChanged updates plugin internals using libcontainerd events.
 // StateChanged updates plugin internals using libcontainerd events.
@@ -91,7 +125,7 @@ func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error {
 
 
 	switch e.State {
 	switch e.State {
 	case libcontainerd.StateExit:
 	case libcontainerd.StateExit:
-		p, err := pm.pluginStore.GetByID(id)
+		p, err := pm.config.Store.GetV2Plugin(id)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -105,7 +139,7 @@ func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error {
 		restart := c.restart
 		restart := c.restart
 		pm.mu.RUnlock()
 		pm.mu.RUnlock()
 
 
-		p.RemoveFromDisk()
+		os.RemoveAll(filepath.Join(pm.config.ExecRoot, id))
 
 
 		if p.PropagatedMount != "" {
 		if p.PropagatedMount != "" {
 			if err := mount.Unmount(p.PropagatedMount); err != nil {
 			if err := mount.Unmount(p.PropagatedMount); err != nil {
@@ -121,37 +155,38 @@ func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error {
 	return nil
 	return nil
 }
 }
 
 
-// reload is used on daemon restarts to load the manager's state
-func (pm *Manager) reload() error {
-	dt, err := os.Open(filepath.Join(pm.libRoot, "plugins.json"))
+func (pm *Manager) reload() error { // todo: restore
+	dir, err := ioutil.ReadDir(pm.config.Root)
 	if err != nil {
 	if err != nil {
-		if os.IsNotExist(err) {
-			return nil
-		}
-		return err
+		return errors.Wrapf(err, "failed to read %v", pm.config.Root)
 	}
 	}
-	defer dt.Close()
-
 	plugins := make(map[string]*v2.Plugin)
 	plugins := make(map[string]*v2.Plugin)
-	if err := json.NewDecoder(dt).Decode(&plugins); err != nil {
-		return err
+	for _, v := range dir {
+		if validFullID.MatchString(v.Name()) {
+			p, err := pm.loadPlugin(v.Name())
+			if err != nil {
+				return err
+			}
+			plugins[p.GetID()] = p
+		}
 	}
 	}
-	pm.pluginStore.SetAll(plugins)
 
 
-	var group sync.WaitGroup
-	group.Add(len(plugins))
+	pm.config.Store.SetAll(plugins)
+
+	var wg sync.WaitGroup
+	wg.Add(len(plugins))
 	for _, p := range plugins {
 	for _, p := range plugins {
-		c := &controller{}
+		c := &controller{} // todo: remove this
 		pm.cMap[p] = c
 		pm.cMap[p] = c
 		go func(p *v2.Plugin) {
 		go func(p *v2.Plugin) {
-			defer group.Done()
+			defer wg.Done()
 			if err := pm.restorePlugin(p); err != nil {
 			if err := pm.restorePlugin(p); err != nil {
 				logrus.Errorf("failed to restore plugin '%s': %s", p.Name(), err)
 				logrus.Errorf("failed to restore plugin '%s': %s", p.Name(), err)
 				return
 				return
 			}
 			}
 
 
 			if p.Rootfs != "" {
 			if p.Rootfs != "" {
-				p.Rootfs = filepath.Join(pm.libRoot, p.PluginObj.ID, "rootfs")
+				p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs")
 			}
 			}
 
 
 			// We should only enable rootfs propagation for certain plugin types that need it.
 			// We should only enable rootfs propagation for certain plugin types that need it.
@@ -168,8 +203,8 @@ func (pm *Manager) reload() error {
 				}
 				}
 			}
 			}
 
 
-			pm.pluginStore.Update(p)
-			requiresManualRestore := !pm.liveRestore && p.IsEnabled()
+			pm.save(p)
+			requiresManualRestore := !pm.config.LiveRestoreEnabled && p.IsEnabled()
 
 
 			if requiresManualRestore {
 			if requiresManualRestore {
 				// if liveRestore is not enabled, the plugin will be stopped now so we should enable it
 				// if liveRestore is not enabled, the plugin will be stopped now so we should enable it
@@ -179,10 +214,50 @@ func (pm *Manager) reload() error {
 			}
 			}
 		}(p)
 		}(p)
 	}
 	}
-	group.Wait()
+	wg.Wait()
 	return nil
 	return nil
 }
 }
 
 
+func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) {
+	p := filepath.Join(pm.config.Root, id, configFileName)
+	dt, err := ioutil.ReadFile(p)
+	if err != nil {
+		return nil, errors.Wrapf(err, "error reading %v", p)
+	}
+	var plugin v2.Plugin
+	if err := json.Unmarshal(dt, &plugin); err != nil {
+		return nil, errors.Wrapf(err, "error decoding %v", p)
+	}
+	return &plugin, nil
+}
+
+func (pm *Manager) save(p *v2.Plugin) error {
+	pluginJSON, err := json.Marshal(p)
+	if err != nil {
+		return errors.Wrap(err, "failed to marshal plugin json")
+	}
+	if err := ioutils.AtomicWriteFile(filepath.Join(pm.config.Root, p.GetID(), configFileName), pluginJSON, 0600); err != nil {
+		return err
+	}
+	return nil
+}
+
+// GC cleans up unrefrenced blobs. This is recommended to run in a goroutine
+func (pm *Manager) GC() {
+	pm.muGC.Lock()
+	defer pm.muGC.Unlock()
+
+	whitelist := make(map[digest.Digest]struct{})
+	for _, p := range pm.config.Store.GetAll() {
+		whitelist[p.Config] = struct{}{}
+		for _, b := range p.Blobsums {
+			whitelist[b] = struct{}{}
+		}
+	}
+
+	pm.blobStore.gc(whitelist)
+}
+
 type logHook struct{ id string }
 type logHook struct{ id string }
 
 
 func (logHook) Levels() []logrus.Level {
 func (logHook) Levels() []logrus.Level {
@@ -212,3 +287,36 @@ func attachToLog(id string) func(libcontainerd.IOPipe) error {
 		return nil
 		return nil
 	}
 	}
 }
 }
+
+func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error {
+	// todo: make a better function that doesn't check order
+	if !reflect.DeepEqual(privileges, requiredPrivileges) {
+		return errors.New("incorrect privileges")
+	}
+	return nil
+}
+
+func configToRootFS(c []byte) (*image.RootFS, error) {
+	var pluginConfig types.PluginConfig
+	if err := json.Unmarshal(c, &pluginConfig); err != nil {
+		return nil, err
+	}
+	// validation for empty rootfs is in distribution code
+	if pluginConfig.Rootfs == nil {
+		return nil, nil
+	}
+
+	return rootFSFromPlugin(pluginConfig.Rootfs), nil
+}
+
+func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS {
+	rootFS := image.RootFS{
+		Type:    pluginfs.Type,
+		DiffIDs: make([]layer.DiffID, len(pluginfs.DiffIds)),
+	}
+	for i := range pluginfs.DiffIds {
+		rootFS.DiffIDs[i] = layer.DiffID(pluginfs.DiffIds[i])
+	}
+
+	return &rootFS
+}

+ 89 - 12
plugin/manager_linux.go

@@ -3,26 +3,32 @@
 package plugin
 package plugin
 
 
 import (
 import (
+	"encoding/json"
 	"fmt"
 	"fmt"
+	"os"
 	"path/filepath"
 	"path/filepath"
 	"syscall"
 	"syscall"
 	"time"
 	"time"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/daemon/initlayer"
 	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/libcontainerd"
-	"github.com/docker/docker/oci"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/plugins"
 	"github.com/docker/docker/pkg/plugins"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/plugin/v2"
 	"github.com/docker/docker/plugin/v2"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
 )
 )
 
 
 func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
 func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
-	p.Rootfs = filepath.Join(pm.libRoot, p.PluginObj.ID, "rootfs")
+	p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs")
 	if p.IsEnabled() && !force {
 	if p.IsEnabled() && !force {
 		return fmt.Errorf("plugin %s is already enabled", p.Name())
 		return fmt.Errorf("plugin %s is already enabled", p.Name())
 	}
 	}
-	spec, err := p.InitSpec(oci.DefaultSpec())
+	spec, err := p.InitSpec(pm.config.ExecRoot)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -40,6 +46,10 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
 		}
 		}
 	}
 	}
 
 
+	if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), 0, 0); err != nil {
+		return err
+	}
+
 	if err := pm.containerdClient.Create(p.GetID(), "", "", specs.Spec(*spec), attachToLog(p.GetID())); err != nil {
 	if err := pm.containerdClient.Create(p.GetID(), "", "", specs.Spec(*spec), attachToLog(p.GetID())); err != nil {
 		if p.PropagatedMount != "" {
 		if p.PropagatedMount != "" {
 			if err := mount.Unmount(p.PropagatedMount); err != nil {
 			if err := mount.Unmount(p.PropagatedMount); err != nil {
@@ -53,7 +63,7 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
 }
 }
 
 
 func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error {
 func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error {
-	client, err := plugins.NewClientWithTimeout("unix://"+filepath.Join(p.GetRuntimeSourcePath(), p.GetSocket()), nil, c.timeoutInSecs)
+	client, err := plugins.NewClientWithTimeout("unix://"+filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()), nil, c.timeoutInSecs)
 	if err != nil {
 	if err != nil {
 		c.restart = false
 		c.restart = false
 		shutdownPlugin(p, c, pm.containerdClient)
 		shutdownPlugin(p, c, pm.containerdClient)
@@ -61,9 +71,10 @@ func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error {
 	}
 	}
 
 
 	p.SetPClient(client)
 	p.SetPClient(client)
-	pm.pluginStore.SetState(p, true)
-	pm.pluginStore.CallHandler(p)
-	return nil
+	pm.config.Store.SetState(p, true)
+	pm.config.Store.CallHandler(p)
+
+	return pm.save(p)
 }
 }
 
 
 func (pm *Manager) restore(p *v2.Plugin) error {
 func (pm *Manager) restore(p *v2.Plugin) error {
@@ -71,7 +82,7 @@ func (pm *Manager) restore(p *v2.Plugin) error {
 		return err
 		return err
 	}
 	}
 
 
-	if pm.liveRestore {
+	if pm.config.LiveRestoreEnabled {
 		c := &controller{}
 		c := &controller{}
 		if pids, _ := pm.containerdClient.GetPidsForContainer(p.GetID()); len(pids) == 0 {
 		if pids, _ := pm.containerdClient.GetPidsForContainer(p.GetID()); len(pids) == 0 {
 			// plugin is not running, so follow normal startup procedure
 			// plugin is not running, so follow normal startup procedure
@@ -115,19 +126,19 @@ func (pm *Manager) disable(p *v2.Plugin, c *controller) error {
 
 
 	c.restart = false
 	c.restart = false
 	shutdownPlugin(p, c, pm.containerdClient)
 	shutdownPlugin(p, c, pm.containerdClient)
-	pm.pluginStore.SetState(p, false)
-	return nil
+	pm.config.Store.SetState(p, false)
+	return pm.save(p)
 }
 }
 
 
 // Shutdown stops all plugins and called during daemon shutdown.
 // Shutdown stops all plugins and called during daemon shutdown.
 func (pm *Manager) Shutdown() {
 func (pm *Manager) Shutdown() {
-	plugins := pm.pluginStore.GetAll()
+	plugins := pm.config.Store.GetAll()
 	for _, p := range plugins {
 	for _, p := range plugins {
 		pm.mu.RLock()
 		pm.mu.RLock()
 		c := pm.cMap[p]
 		c := pm.cMap[p]
 		pm.mu.RUnlock()
 		pm.mu.RUnlock()
 
 
-		if pm.liveRestore && p.IsEnabled() {
+		if pm.config.LiveRestoreEnabled && p.IsEnabled() {
 			logrus.Debug("Plugin active when liveRestore is set, skipping shutdown")
 			logrus.Debug("Plugin active when liveRestore is set, skipping shutdown")
 			continue
 			continue
 		}
 		}
@@ -137,3 +148,69 @@ func (pm *Manager) Shutdown() {
 		}
 		}
 	}
 	}
 }
 }
+
+// createPlugin creates a new plugin. take lock before calling.
+func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges) (p *v2.Plugin, err error) {
+	if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store
+		return nil, err
+	}
+
+	configRC, err := pm.blobStore.Get(configDigest)
+	if err != nil {
+		return nil, err
+	}
+	defer configRC.Close()
+
+	var config types.PluginConfig
+	dec := json.NewDecoder(configRC)
+	if err := dec.Decode(&config); err != nil {
+		return nil, errors.Wrapf(err, "failed to parse config")
+	}
+	if dec.More() {
+		return nil, errors.New("invalid config json")
+	}
+
+	requiredPrivileges, err := computePrivileges(config)
+	if err != nil {
+		return nil, err
+	}
+	if privileges != nil {
+		if err := validatePrivileges(requiredPrivileges, *privileges); err != nil {
+			return nil, err
+		}
+	}
+
+	p = &v2.Plugin{
+		PluginObj: types.Plugin{
+			Name:   name,
+			ID:     stringid.GenerateRandomID(),
+			Config: config,
+		},
+		Config:   configDigest,
+		Blobsums: blobsums,
+	}
+	p.InitEmptySettings()
+
+	pdir := filepath.Join(pm.config.Root, p.PluginObj.ID)
+	if err := os.MkdirAll(pdir, 0700); err != nil {
+		return nil, errors.Wrapf(err, "failed to mkdir %v", pdir)
+	}
+
+	defer func() {
+		if err != nil {
+			os.RemoveAll(pdir)
+		}
+	}()
+
+	if err := os.Rename(rootFSDir, filepath.Join(pdir, rootFSFileName)); err != nil {
+		return nil, errors.Wrap(err, "failed to rename rootfs")
+	}
+
+	if err := pm.save(p); err != nil {
+		return nil, err
+	}
+
+	pm.config.Store.Add(p) // todo: remove
+
+	return p, nil
+}

+ 47 - 71
plugin/store/store.go → plugin/store.go

@@ -1,16 +1,15 @@
-package store
+package plugin
 
 
 import (
 import (
-	"encoding/json"
 	"fmt"
 	"fmt"
 	"strings"
 	"strings"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugins"
 	"github.com/docker/docker/pkg/plugins"
 	"github.com/docker/docker/plugin/v2"
 	"github.com/docker/docker/plugin/v2"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/reference"
+	"github.com/pkg/errors"
 )
 )
 
 
 /* allowV1PluginsFallback determines daemon's support for V1 plugins.
 /* allowV1PluginsFallback determines daemon's support for V1 plugins.
@@ -37,33 +36,32 @@ func (name ErrAmbiguous) Error() string {
 	return fmt.Sprintf("multiple plugins found for %q", string(name))
 	return fmt.Sprintf("multiple plugins found for %q", string(name))
 }
 }
 
 
-// GetByName retreives a plugin by name.
-func (ps *Store) GetByName(name string) (*v2.Plugin, error) {
+// GetV2Plugin retreives a plugin by name, id or partial ID.
+func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) {
 	ps.RLock()
 	ps.RLock()
 	defer ps.RUnlock()
 	defer ps.RUnlock()
 
 
-	id, nameOk := ps.nameToID[name]
-	if !nameOk {
-		return nil, ErrNotFound(name)
+	id, err := ps.resolvePluginID(refOrID)
+	if err != nil {
+		return nil, err
 	}
 	}
 
 
 	p, idOk := ps.plugins[id]
 	p, idOk := ps.plugins[id]
 	if !idOk {
 	if !idOk {
-		return nil, ErrNotFound(id)
+		return nil, errors.WithStack(ErrNotFound(id))
 	}
 	}
+
 	return p, nil
 	return p, nil
 }
 }
 
 
-// GetByID retreives a plugin by ID.
-func (ps *Store) GetByID(id string) (*v2.Plugin, error) {
-	ps.RLock()
-	defer ps.RUnlock()
-
-	p, idOk := ps.plugins[id]
-	if !idOk {
-		return nil, ErrNotFound(id)
+// validateName returns error if name is already reserved. always call with lock and full name
+func (ps *Store) validateName(name string) error {
+	for _, p := range ps.plugins {
+		if p.Name() == name {
+			return errors.Errorf("%v already exists", name)
+		}
 	}
 	}
-	return p, nil
+	return nil
 }
 }
 
 
 // GetAll retreives all plugins.
 // GetAll retreives all plugins.
@@ -101,7 +99,6 @@ func (ps *Store) SetState(p *v2.Plugin, state bool) {
 	defer ps.Unlock()
 	defer ps.Unlock()
 
 
 	p.PluginObj.Enabled = state
 	p.PluginObj.Enabled = state
-	ps.updatePluginDB()
 }
 }
 
 
 // Add adds a plugin to memory and plugindb.
 // Add adds a plugin to memory and plugindb.
@@ -113,45 +110,17 @@ func (ps *Store) Add(p *v2.Plugin) error {
 	if v, exist := ps.plugins[p.GetID()]; exist {
 	if v, exist := ps.plugins[p.GetID()]; exist {
 		return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name())
 		return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name())
 	}
 	}
-	if _, exist := ps.nameToID[p.Name()]; exist {
-		return fmt.Errorf("plugin %q already exists", p.Name())
-	}
 	ps.plugins[p.GetID()] = p
 	ps.plugins[p.GetID()] = p
-	ps.nameToID[p.Name()] = p.GetID()
-	ps.updatePluginDB()
 	return nil
 	return nil
 }
 }
 
 
-// Update updates a plugin to memory and plugindb.
-func (ps *Store) Update(p *v2.Plugin) {
-	ps.Lock()
-	defer ps.Unlock()
-
-	ps.plugins[p.GetID()] = p
-	ps.nameToID[p.Name()] = p.GetID()
-	ps.updatePluginDB()
-}
-
 // Remove removes a plugin from memory and plugindb.
 // Remove removes a plugin from memory and plugindb.
 func (ps *Store) Remove(p *v2.Plugin) {
 func (ps *Store) Remove(p *v2.Plugin) {
 	ps.Lock()
 	ps.Lock()
 	delete(ps.plugins, p.GetID())
 	delete(ps.plugins, p.GetID())
-	delete(ps.nameToID, p.Name())
-	ps.updatePluginDB()
 	ps.Unlock()
 	ps.Unlock()
 }
 }
 
 
-// Callers are expected to hold the store lock.
-func (ps *Store) updatePluginDB() error {
-	jsonData, err := json.Marshal(ps.plugins)
-	if err != nil {
-		logrus.Debugf("Error in json.Marshal: %v", err)
-		return err
-	}
-	ioutils.AtomicWriteFile(ps.plugindb, jsonData, 0600)
-	return nil
-}
-
 // Get returns an enabled plugin matching the given name and capability.
 // Get returns an enabled plugin matching the given name and capability.
 func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) {
 func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) {
 	var (
 	var (
@@ -161,18 +130,7 @@ func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlug
 
 
 	// Lookup using new model.
 	// Lookup using new model.
 	if ps != nil {
 	if ps != nil {
-		fullName := name
-		if named, err := reference.ParseNamed(fullName); err == nil { // FIXME: validate
-			if reference.IsNameOnly(named) {
-				named = reference.WithDefaultTag(named)
-			}
-			ref, ok := named.(reference.NamedTagged)
-			if !ok {
-				return nil, fmt.Errorf("invalid name: %s", named.String())
-			}
-			fullName = ref.String()
-		}
-		p, err = ps.GetByName(fullName)
+		p, err = ps.GetV2Plugin(name)
 		if err == nil {
 		if err == nil {
 			p.AddRefCount(mode)
 			p.AddRefCount(mode)
 			if p.IsEnabled() {
 			if p.IsEnabled() {
@@ -180,9 +138,9 @@ func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlug
 			}
 			}
 			// Plugin was found but it is disabled, so we should not fall back to legacy plugins
 			// Plugin was found but it is disabled, so we should not fall back to legacy plugins
 			// but we should error out right away
 			// but we should error out right away
-			return nil, ErrNotFound(fullName)
+			return nil, ErrNotFound(name)
 		}
 		}
-		if _, ok := err.(ErrNotFound); !ok {
+		if _, ok := errors.Cause(err).(ErrNotFound); !ok {
 			return nil, err
 			return nil, err
 		}
 		}
 	}
 	}
@@ -259,24 +217,42 @@ func (ps *Store) CallHandler(p *v2.Plugin) {
 	}
 	}
 }
 }
 
 
-// Search retreives a plugin by ID Prefix
-// If no plugin is found, then ErrNotFound is returned
-// If multiple plugins are found, then ErrAmbiguous is returned
-func (ps *Store) Search(partialID string) (*v2.Plugin, error) {
-	ps.RLock()
+func (ps *Store) resolvePluginID(idOrName string) (string, error) {
+	ps.RLock() // todo: fix
 	defer ps.RUnlock()
 	defer ps.RUnlock()
 
 
+	if validFullID.MatchString(idOrName) {
+		return idOrName, nil
+	}
+
+	ref, err := reference.ParseNamed(idOrName)
+	if err != nil {
+		return "", errors.Wrapf(err, "failed to parse %v", idOrName)
+	}
+	if _, ok := ref.(reference.Canonical); ok {
+		logrus.Warnf("canonical references cannot be resolved: %v", ref.String())
+		return "", errors.WithStack(ErrNotFound(idOrName))
+	}
+
+	fullRef := reference.WithDefaultTag(ref)
+
+	for _, p := range ps.plugins {
+		if p.PluginObj.Name == fullRef.String() {
+			return p.PluginObj.ID, nil
+		}
+	}
+
 	var found *v2.Plugin
 	var found *v2.Plugin
-	for id, p := range ps.plugins {
-		if strings.HasPrefix(id, partialID) {
+	for id, p := range ps.plugins { // this can be optimized
+		if strings.HasPrefix(id, idOrName) {
 			if found != nil {
 			if found != nil {
-				return nil, ErrAmbiguous(partialID)
+				return "", errors.WithStack(ErrAmbiguous(idOrName))
 			}
 			}
 			found = p
 			found = p
 		}
 		}
 	}
 	}
 	if found == nil {
 	if found == nil {
-		return nil, ErrNotFound(partialID)
+		return "", errors.WithStack(ErrNotFound(idOrName))
 	}
 	}
-	return found, nil
+	return found.PluginObj.ID, nil
 }
 }

+ 3 - 4
plugin/store/store_test.go → plugin/store_test.go

@@ -1,4 +1,4 @@
-package store
+package plugin
 
 
 import (
 import (
 	"testing"
 	"testing"
@@ -8,8 +8,7 @@ import (
 )
 )
 
 
 func TestFilterByCapNeg(t *testing.T) {
 func TestFilterByCapNeg(t *testing.T) {
-	p := v2.NewPlugin("test", "1234567890", "/run/docker", "/var/lib/docker/plugins", "latest")
-
+	p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}}
 	iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"}
 	iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"}
 	i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}}
 	i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}}
 	p.PluginObj.Config.Interface = i
 	p.PluginObj.Config.Interface = i
@@ -21,7 +20,7 @@ func TestFilterByCapNeg(t *testing.T) {
 }
 }
 
 
 func TestFilterByCapPos(t *testing.T) {
 func TestFilterByCapPos(t *testing.T) {
-	p := v2.NewPlugin("test", "1234567890", "/run/docker", "/var/lib/docker/plugins", "latest")
+	p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}}
 
 
 	iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"}
 	iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"}
 	i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}}
 	i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}}

+ 14 - 183
plugin/v2/plugin.go

@@ -1,32 +1,27 @@
 package v2
 package v2
 
 
 import (
 import (
-	"encoding/json"
-	"errors"
 	"fmt"
 	"fmt"
-	"os"
-	"path/filepath"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
 
 
+	"github.com/docker/distribution/digest"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/oci"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugins"
 	"github.com/docker/docker/pkg/plugins"
-	"github.com/docker/docker/pkg/system"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
 )
 )
 
 
 // Plugin represents an individual plugin.
 // Plugin represents an individual plugin.
 type Plugin struct {
 type Plugin struct {
-	mu                sync.RWMutex
-	PluginObj         types.Plugin `json:"plugin"`
-	pClient           *plugins.Client
-	runtimeSourcePath string
-	refCount          int
-	LibRoot           string // TODO: make private
-	PropagatedMount   string // TODO: make private
-	Rootfs            string // TODO: make private
+	mu              sync.RWMutex
+	PluginObj       types.Plugin `json:"plugin"` // todo: embed struct
+	pClient         *plugins.Client
+	refCount        int
+	PropagatedMount string // TODO: make private
+	Rootfs          string // TODO: make private
+
+	Config   digest.Digest
+	Blobsums []digest.Digest
 }
 }
 
 
 const defaultPluginRuntimeDestination = "/run/docker/plugins"
 const defaultPluginRuntimeDestination = "/run/docker/plugins"
@@ -40,33 +35,6 @@ func (e ErrInadequateCapability) Error() string {
 	return fmt.Sprintf("plugin does not provide %q capability", e.cap)
 	return fmt.Sprintf("plugin does not provide %q capability", e.cap)
 }
 }
 
 
-func newPluginObj(name, id, tag string) types.Plugin {
-	return types.Plugin{Name: name, ID: id, Tag: tag}
-}
-
-// NewPlugin creates a plugin.
-func NewPlugin(name, id, runRoot, libRoot, tag string) *Plugin {
-	return &Plugin{
-		PluginObj:         newPluginObj(name, id, tag),
-		runtimeSourcePath: filepath.Join(runRoot, id),
-		LibRoot:           libRoot,
-	}
-}
-
-// Restore restores the plugin
-func (p *Plugin) Restore(runRoot string) {
-	p.runtimeSourcePath = filepath.Join(runRoot, p.GetID())
-}
-
-// GetRuntimeSourcePath gets the Source (host) path of the plugin socket
-// This path gets bind mounted into the plugin.
-func (p *Plugin) GetRuntimeSourcePath() string {
-	p.mu.RLock()
-	defer p.mu.RUnlock()
-
-	return p.runtimeSourcePath
-}
-
 // BasePath returns the path to which all paths returned by the plugin are relative to.
 // BasePath returns the path to which all paths returned by the plugin are relative to.
 // For Plugin objects this returns the host path of the plugin container's rootfs.
 // For Plugin objects this returns the host path of the plugin container's rootfs.
 func (p *Plugin) BasePath() string {
 func (p *Plugin) BasePath() string {
@@ -96,12 +64,7 @@ func (p *Plugin) IsV1() bool {
 
 
 // Name returns the plugin name.
 // Name returns the plugin name.
 func (p *Plugin) Name() string {
 func (p *Plugin) Name() string {
-	name := p.PluginObj.Name
-	if len(p.PluginObj.Tag) > 0 {
-		// TODO: this feels hacky, maybe we should be storing the distribution reference rather than splitting these
-		name += ":" + p.PluginObj.Tag
-	}
-	return name
+	return p.PluginObj.Name
 }
 }
 
 
 // FilterByCap query the plugin for a given capability.
 // FilterByCap query the plugin for a given capability.
@@ -115,23 +78,8 @@ func (p *Plugin) FilterByCap(capability string) (*Plugin, error) {
 	return nil, ErrInadequateCapability{capability}
 	return nil, ErrInadequateCapability{capability}
 }
 }
 
 
-// RemoveFromDisk deletes the plugin's runtime files from disk.
-func (p *Plugin) RemoveFromDisk() error {
-	return os.RemoveAll(p.runtimeSourcePath)
-}
-
-// InitPlugin populates the plugin object from the plugin config file.
-func (p *Plugin) InitPlugin() error {
-	dt, err := os.Open(filepath.Join(p.LibRoot, p.PluginObj.ID, "config.json"))
-	if err != nil {
-		return err
-	}
-	err = json.NewDecoder(dt).Decode(&p.PluginObj.Config)
-	dt.Close()
-	if err != nil {
-		return err
-	}
-
+// InitEmptySettings initializes empty settings for a plugin.
+func (p *Plugin) InitEmptySettings() {
 	p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts))
 	p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts))
 	copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts)
 	copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts)
 	p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices))
 	p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices))
@@ -144,18 +92,6 @@ func (p *Plugin) InitPlugin() error {
 	}
 	}
 	p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value))
 	p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value))
 	copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value)
 	copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value)
-
-	return p.writeSettings()
-}
-
-func (p *Plugin) writeSettings() error {
-	f, err := os.Create(filepath.Join(p.LibRoot, p.PluginObj.ID, "plugin-settings.json"))
-	if err != nil {
-		return err
-	}
-	err = json.NewEncoder(f).Encode(&p.PluginObj.Settings)
-	f.Close()
-	return err
 }
 }
 
 
 // Set is used to pass arguments to the plugin.
 // Set is used to pass arguments to the plugin.
@@ -243,8 +179,7 @@ next:
 		return fmt.Errorf("setting %q not found in the plugin configuration", s.name)
 		return fmt.Errorf("setting %q not found in the plugin configuration", s.name)
 	}
 	}
 
 
-	// update the settings on disk
-	return p.writeSettings()
+	return nil
 }
 }
 
 
 // IsEnabled returns the active state of the plugin.
 // IsEnabled returns the active state of the plugin.
@@ -307,107 +242,3 @@ func (p *Plugin) Acquire() {
 func (p *Plugin) Release() {
 func (p *Plugin) Release() {
 	p.AddRefCount(plugingetter.RELEASE)
 	p.AddRefCount(plugingetter.RELEASE)
 }
 }
-
-// InitSpec creates an OCI spec from the plugin's config.
-func (p *Plugin) InitSpec(s specs.Spec) (*specs.Spec, error) {
-	s.Root = specs.Root{
-		Path:     p.Rootfs,
-		Readonly: false, // TODO: all plugins should be readonly? settable in config?
-	}
-
-	userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts))
-	for _, m := range p.PluginObj.Settings.Mounts {
-		userMounts[m.Destination] = struct{}{}
-	}
-
-	if err := os.MkdirAll(p.runtimeSourcePath, 0755); err != nil {
-		return nil, err
-	}
-
-	mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{
-		Source:      &p.runtimeSourcePath,
-		Destination: defaultPluginRuntimeDestination,
-		Type:        "bind",
-		Options:     []string{"rbind", "rshared"},
-	})
-
-	if p.PluginObj.Config.Network.Type != "" {
-		// TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize)
-		if p.PluginObj.Config.Network.Type == "host" {
-			oci.RemoveNamespace(&s, specs.NamespaceType("network"))
-		}
-		etcHosts := "/etc/hosts"
-		resolvConf := "/etc/resolv.conf"
-		mounts = append(mounts,
-			types.PluginMount{
-				Source:      &etcHosts,
-				Destination: etcHosts,
-				Type:        "bind",
-				Options:     []string{"rbind", "ro"},
-			},
-			types.PluginMount{
-				Source:      &resolvConf,
-				Destination: resolvConf,
-				Type:        "bind",
-				Options:     []string{"rbind", "ro"},
-			})
-	}
-
-	for _, mnt := range mounts {
-		m := specs.Mount{
-			Destination: mnt.Destination,
-			Type:        mnt.Type,
-			Options:     mnt.Options,
-		}
-		if mnt.Source == nil {
-			return nil, errors.New("mount source is not specified")
-		}
-		m.Source = *mnt.Source
-		s.Mounts = append(s.Mounts, m)
-	}
-
-	for i, m := range s.Mounts {
-		if strings.HasPrefix(m.Destination, "/dev/") {
-			if _, ok := userMounts[m.Destination]; ok {
-				s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...)
-			}
-		}
-	}
-
-	if p.PluginObj.Config.PropagatedMount != "" {
-		p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount)
-		s.Linux.RootfsPropagation = "rshared"
-	}
-
-	if p.PluginObj.Config.Linux.DeviceCreation {
-		rwm := "rwm"
-		s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}}
-	}
-	for _, dev := range p.PluginObj.Settings.Devices {
-		path := *dev.Path
-		d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm")
-		if err != nil {
-			return nil, err
-		}
-		s.Linux.Devices = append(s.Linux.Devices, d...)
-		s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...)
-	}
-
-	envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1)
-	envs[0] = "PATH=" + system.DefaultPathEnv
-	envs = append(envs, p.PluginObj.Settings.Env...)
-
-	args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...)
-	cwd := p.PluginObj.Config.Workdir
-	if len(cwd) == 0 {
-		cwd = "/"
-	}
-	s.Process.Terminal = false
-	s.Process.Args = args
-	s.Process.Cwd = cwd
-	s.Process.Env = envs
-
-	s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...)
-
-	return &s, nil
-}

+ 121 - 0
plugin/v2/plugin_linux.go

@@ -0,0 +1,121 @@
+// +build linux
+
+package v2
+
+import (
+	"errors"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/oci"
+	"github.com/docker/docker/pkg/system"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// InitSpec creates an OCI spec from the plugin's config.
+func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) {
+	s := oci.DefaultSpec()
+	s.Root = specs.Root{
+		Path:     p.Rootfs,
+		Readonly: false, // TODO: all plugins should be readonly? settable in config?
+	}
+
+	userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts))
+	for _, m := range p.PluginObj.Settings.Mounts {
+		userMounts[m.Destination] = struct{}{}
+	}
+
+	execRoot = filepath.Join(execRoot, p.PluginObj.ID)
+	if err := os.MkdirAll(execRoot, 0700); err != nil {
+		return nil, err
+	}
+
+	mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{
+		Source:      &execRoot,
+		Destination: defaultPluginRuntimeDestination,
+		Type:        "bind",
+		Options:     []string{"rbind", "rshared"},
+	})
+
+	if p.PluginObj.Config.Network.Type != "" {
+		// TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize)
+		if p.PluginObj.Config.Network.Type == "host" {
+			oci.RemoveNamespace(&s, specs.NamespaceType("network"))
+		}
+		etcHosts := "/etc/hosts"
+		resolvConf := "/etc/resolv.conf"
+		mounts = append(mounts,
+			types.PluginMount{
+				Source:      &etcHosts,
+				Destination: etcHosts,
+				Type:        "bind",
+				Options:     []string{"rbind", "ro"},
+			},
+			types.PluginMount{
+				Source:      &resolvConf,
+				Destination: resolvConf,
+				Type:        "bind",
+				Options:     []string{"rbind", "ro"},
+			})
+	}
+
+	for _, mnt := range mounts {
+		m := specs.Mount{
+			Destination: mnt.Destination,
+			Type:        mnt.Type,
+			Options:     mnt.Options,
+		}
+		if mnt.Source == nil {
+			return nil, errors.New("mount source is not specified")
+		}
+		m.Source = *mnt.Source
+		s.Mounts = append(s.Mounts, m)
+	}
+
+	for i, m := range s.Mounts {
+		if strings.HasPrefix(m.Destination, "/dev/") {
+			if _, ok := userMounts[m.Destination]; ok {
+				s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...)
+			}
+		}
+	}
+
+	if p.PluginObj.Config.PropagatedMount != "" {
+		p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount)
+		s.Linux.RootfsPropagation = "rshared"
+	}
+
+	if p.PluginObj.Config.Linux.DeviceCreation {
+		rwm := "rwm"
+		s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}}
+	}
+	for _, dev := range p.PluginObj.Settings.Devices {
+		path := *dev.Path
+		d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm")
+		if err != nil {
+			return nil, err
+		}
+		s.Linux.Devices = append(s.Linux.Devices, d...)
+		s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...)
+	}
+
+	envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1)
+	envs[0] = "PATH=" + system.DefaultPathEnv
+	envs = append(envs, p.PluginObj.Settings.Env...)
+
+	args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...)
+	cwd := p.PluginObj.Config.WorkDir
+	if len(cwd) == 0 {
+		cwd = "/"
+	}
+	s.Process.Terminal = false
+	s.Process.Args = args
+	s.Process.Cwd = cwd
+	s.Process.Env = envs
+
+	s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...)
+
+	return &s, nil
+}

+ 14 - 0
plugin/v2/plugin_unsupported.go

@@ -0,0 +1,14 @@
+// +build !linux
+
+package v2
+
+import (
+	"errors"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// InitSpec creates an OCI spec from the plugin's config.
+func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) {
+	return nil, errors.New("not supported")
+}

+ 1 - 1
vendor.conf

@@ -44,7 +44,7 @@ github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
 github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
 github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
 
 
 # get graph and distribution packages
 # get graph and distribution packages
-github.com/docker/distribution a6bf3dd064f15598166bca2d66a9962a9555139e
+github.com/docker/distribution 28602af35aceda2f8d571bad7ca37a54cf0250bc
 github.com/vbatts/tar-split v0.10.1
 github.com/vbatts/tar-split v0.10.1
 
 
 # get go-zfs packages
 # get go-zfs packages

+ 5 - 0
vendor/github.com/docker/distribution/digest/digest.go

@@ -80,6 +80,11 @@ func FromBytes(p []byte) Digest {
 	return Canonical.FromBytes(p)
 	return Canonical.FromBytes(p)
 }
 }
 
 
+// FromString digests the input and returns a Digest.
+func FromString(s string) Digest {
+	return Canonical.FromString(s)
+}
+
 // Validate checks that the contents of d is a valid digest, returning an
 // Validate checks that the contents of d is a valid digest, returning an
 // error if not.
 // error if not.
 func (d Digest) Validate() error {
 func (d Digest) Validate() error {

+ 5 - 0
vendor/github.com/docker/distribution/digest/digester.go

@@ -129,6 +129,11 @@ func (a Algorithm) FromBytes(p []byte) Digest {
 	return digester.Digest()
 	return digester.Digest()
 }
 }
 
 
+// FromString digests the string input and returns a Digest.
+func (a Algorithm) FromString(s string) Digest {
+	return a.FromBytes([]byte(s))
+}
+
 // TODO(stevvooe): Allow resolution of verifiers using the digest type and
 // TODO(stevvooe): Allow resolution of verifiers using the digest type and
 // this registration system.
 // this registration system.
 
 

+ 7 - 2
vendor/github.com/docker/distribution/manifest/schema1/config_builder.go

@@ -240,8 +240,13 @@ func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, e
 
 
 // AppendReference adds a reference to the current ManifestBuilder
 // AppendReference adds a reference to the current ManifestBuilder
 func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error {
 func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error {
-	// todo: verification here?
-	mb.descriptors = append(mb.descriptors, d.Descriptor())
+	descriptor := d.Descriptor()
+
+	if err := descriptor.Digest.Validate(); err != nil {
+		return err
+	}
+
+	mb.descriptors = append(mb.descriptors, descriptor)
 	return nil
 	return nil
 }
 }
 
 

+ 17 - 13
vendor/github.com/docker/distribution/manifest/schema2/builder.go

@@ -11,21 +11,25 @@ type builder struct {
 	// bs is a BlobService used to publish the configuration blob.
 	// bs is a BlobService used to publish the configuration blob.
 	bs distribution.BlobService
 	bs distribution.BlobService
 
 
+	// configMediaType is media type used to describe configuration
+	configMediaType string
+
 	// configJSON references
 	// configJSON references
 	configJSON []byte
 	configJSON []byte
 
 
-	// layers is a list of layer descriptors that gets built by successive
-	// calls to AppendReference.
-	layers []distribution.Descriptor
+	// dependencies is a list of descriptors that gets built by successive
+	// calls to AppendReference. In case of image configuration these are layers.
+	dependencies []distribution.Descriptor
 }
 }
 
 
 // NewManifestBuilder is used to build new manifests for the current schema
 // NewManifestBuilder is used to build new manifests for the current schema
 // version. It takes a BlobService so it can publish the configuration blob
 // version. It takes a BlobService so it can publish the configuration blob
 // as part of the Build process.
 // as part of the Build process.
-func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder {
+func NewManifestBuilder(bs distribution.BlobService, configMediaType string, configJSON []byte) distribution.ManifestBuilder {
 	mb := &builder{
 	mb := &builder{
-		bs:         bs,
-		configJSON: make([]byte, len(configJSON)),
+		bs:              bs,
+		configMediaType: configMediaType,
+		configJSON:      make([]byte, len(configJSON)),
 	}
 	}
 	copy(mb.configJSON, configJSON)
 	copy(mb.configJSON, configJSON)
 
 
@@ -36,9 +40,9 @@ func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribu
 func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
 func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
 	m := Manifest{
 	m := Manifest{
 		Versioned: SchemaVersion,
 		Versioned: SchemaVersion,
-		Layers:    make([]distribution.Descriptor, len(mb.layers)),
+		Layers:    make([]distribution.Descriptor, len(mb.dependencies)),
 	}
 	}
-	copy(m.Layers, mb.layers)
+	copy(m.Layers, mb.dependencies)
 
 
 	configDigest := digest.FromBytes(mb.configJSON)
 	configDigest := digest.FromBytes(mb.configJSON)
 
 
@@ -48,7 +52,7 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
 	case nil:
 	case nil:
 		// Override MediaType, since Put always replaces the specified media
 		// Override MediaType, since Put always replaces the specified media
 		// type with application/octet-stream in the descriptor it returns.
 		// type with application/octet-stream in the descriptor it returns.
-		m.Config.MediaType = MediaTypeConfig
+		m.Config.MediaType = mb.configMediaType
 		return FromStruct(m)
 		return FromStruct(m)
 	case distribution.ErrBlobUnknown:
 	case distribution.ErrBlobUnknown:
 		// nop
 		// nop
@@ -57,10 +61,10 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
 	}
 	}
 
 
 	// Add config to the blob store
 	// Add config to the blob store
-	m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON)
+	m.Config, err = mb.bs.Put(ctx, mb.configMediaType, mb.configJSON)
 	// Override MediaType, since Put always replaces the specified media
 	// Override MediaType, since Put always replaces the specified media
 	// type with application/octet-stream in the descriptor it returns.
 	// type with application/octet-stream in the descriptor it returns.
-	m.Config.MediaType = MediaTypeConfig
+	m.Config.MediaType = mb.configMediaType
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
@@ -70,11 +74,11 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
 
 
 // AppendReference adds a reference to the current ManifestBuilder.
 // AppendReference adds a reference to the current ManifestBuilder.
 func (mb *builder) AppendReference(d distribution.Describable) error {
 func (mb *builder) AppendReference(d distribution.Describable) error {
-	mb.layers = append(mb.layers, d.Descriptor())
+	mb.dependencies = append(mb.dependencies, d.Descriptor())
 	return nil
 	return nil
 }
 }
 
 
 // References returns the current references added to this builder.
 // References returns the current references added to this builder.
 func (mb *builder) References() []distribution.Descriptor {
 func (mb *builder) References() []distribution.Descriptor {
-	return mb.layers
+	return mb.dependencies
 }
 }

+ 6 - 2
vendor/github.com/docker/distribution/manifest/schema2/manifest.go

@@ -14,8 +14,8 @@ const (
 	// MediaTypeManifest specifies the mediaType for the current version.
 	// MediaTypeManifest specifies the mediaType for the current version.
 	MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
 	MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
 
 
-	// MediaTypeConfig specifies the mediaType for the image configuration.
-	MediaTypeConfig = "application/vnd.docker.container.image.v1+json"
+	// MediaTypeImageConfig specifies the mediaType for the image configuration.
+	MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json"
 
 
 	// MediaTypePluginConfig specifies the mediaType for plugin configuration.
 	// MediaTypePluginConfig specifies the mediaType for plugin configuration.
 	MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
 	MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
@@ -27,6 +27,10 @@ const (
 	// MediaTypeForeignLayer is the mediaType used for layers that must be
 	// MediaTypeForeignLayer is the mediaType used for layers that must be
 	// downloaded from foreign URLs.
 	// downloaded from foreign URLs.
 	MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
 	MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
+
+	// MediaTypeUncompressedLayer is the mediaType used for layers which
+	// are not compressed.
+	MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
 )
 )
 
 
 var (
 var (

+ 3 - 1
vendor/github.com/docker/distribution/registry/client/auth/session.go

@@ -155,7 +155,9 @@ type RepositoryScope struct {
 // using the scope grammar
 // using the scope grammar
 func (rs RepositoryScope) String() string {
 func (rs RepositoryScope) String() string {
 	repoType := "repository"
 	repoType := "repository"
-	if rs.Class != "" {
+	// Keep existing format for image class to maintain backwards compatibility
+	// with authorization servers which do not support the expanded grammar.
+	if rs.Class != "" && rs.Class != "image" {
 		repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class)
 		repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class)
 	}
 	}
 	return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ","))
 	return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ","))

+ 23 - 17
volume/drivers/extpoint.go

@@ -111,23 +111,25 @@ func lookup(name string, mode int) (volume.Driver, error) {
 	if ok {
 	if ok {
 		return ext, nil
 		return ext, nil
 	}
 	}
+	if drivers.plugingetter != nil {
+		p, err := drivers.plugingetter.Get(name, extName, mode)
+		if err != nil {
+			return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err)
+		}
 
 
-	p, err := drivers.plugingetter.Get(name, extName, mode)
-	if err != nil {
-		return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err)
-	}
-
-	d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client())
-	if err := validateDriver(d); err != nil {
-		return nil, err
-	}
+		d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client())
+		if err := validateDriver(d); err != nil {
+			return nil, err
+		}
 
 
-	if p.IsV1() {
-		drivers.Lock()
-		drivers.extensions[name] = d
-		drivers.Unlock()
+		if p.IsV1() {
+			drivers.Lock()
+			drivers.extensions[name] = d
+			drivers.Unlock()
+		}
+		return d, nil
 	}
 	}
-	return d, nil
+	return nil, fmt.Errorf("Error looking up volume plugin %s", name)
 }
 }
 
 
 func validateDriver(vd volume.Driver) error {
 func validateDriver(vd volume.Driver) error {
@@ -179,9 +181,13 @@ func GetDriverList() []string {
 
 
 // GetAllDrivers lists all the registered drivers
 // GetAllDrivers lists all the registered drivers
 func GetAllDrivers() ([]volume.Driver, error) {
 func GetAllDrivers() ([]volume.Driver, error) {
-	plugins, err := drivers.plugingetter.GetAllByCap(extName)
-	if err != nil {
-		return nil, fmt.Errorf("error listing plugins: %v", err)
+	var plugins []getter.CompatPlugin
+	if drivers.plugingetter != nil {
+		var err error
+		plugins, err = drivers.plugingetter.GetAllByCap(extName)
+		if err != nil {
+			return nil, fmt.Errorf("error listing plugins: %v", err)
+		}
 	}
 	}
 	var ds []volume.Driver
 	var ds []volume.Driver
 
 

+ 0 - 4
volume/drivers/extpoint_test.go

@@ -3,14 +3,10 @@ package volumedrivers
 import (
 import (
 	"testing"
 	"testing"
 
 
-	pluginstore "github.com/docker/docker/plugin/store"
 	volumetestutils "github.com/docker/docker/volume/testutils"
 	volumetestutils "github.com/docker/docker/volume/testutils"
 )
 )
 
 
 func TestGetDriver(t *testing.T) {
 func TestGetDriver(t *testing.T) {
-	pluginStore := pluginstore.NewStore("/var/lib/docker")
-	RegisterPluginGetter(pluginStore)
-
 	_, err := GetDriver("missing")
 	_, err := GetDriver("missing")
 	if err == nil {
 	if err == nil {
 		t.Fatal("Expected error, was nil")
 		t.Fatal("Expected error, was nil")

+ 0 - 4
volume/store/store_test.go

@@ -7,15 +7,11 @@ import (
 	"strings"
 	"strings"
 	"testing"
 	"testing"
 
 
-	pluginstore "github.com/docker/docker/plugin/store"
 	"github.com/docker/docker/volume/drivers"
 	"github.com/docker/docker/volume/drivers"
 	volumetestutils "github.com/docker/docker/volume/testutils"
 	volumetestutils "github.com/docker/docker/volume/testutils"
 )
 )
 
 
 func TestCreate(t *testing.T) {
 func TestCreate(t *testing.T) {
-	pluginStore := pluginstore.NewStore("/var/lib/docker")
-	volumedrivers.RegisterPluginGetter(pluginStore)
-
 	volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake")
 	volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake")
 	defer volumedrivers.Unregister("fake")
 	defer volumedrivers.Unregister("fake")
 	dir, err := ioutil.TempDir("", "test-create")
 	dir, err := ioutil.TempDir("", "test-create")