2018-02-05 21:05:59 +00:00
|
|
|
package plugin // import "github.com/docker/docker/plugin"
|
2016-05-16 15:50:55 +00:00
|
|
|
|
|
|
|
import (
|
2016-12-12 23:05:53 +00:00
|
|
|
"archive/tar"
|
2020-02-11 00:31:04 +00:00
|
|
|
"bytes"
|
2016-12-12 23:05:53 +00:00
|
|
|
"compress/gzip"
|
2018-04-19 22:30:59 +00:00
|
|
|
"context"
|
2016-08-10 23:48:17 +00:00
|
|
|
"encoding/json"
|
2016-10-04 19:01:19 +00:00
|
|
|
"io"
|
2016-05-16 15:50:55 +00:00
|
|
|
"net/http"
|
|
|
|
"os"
|
2016-12-12 23:05:53 +00:00
|
|
|
"path"
|
2016-05-16 15:50:55 +00:00
|
|
|
"path/filepath"
|
2016-12-12 23:05:53 +00:00
|
|
|
"strings"
|
2020-02-11 00:31:04 +00:00
|
|
|
"time"
|
2016-05-16 15:50:55 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
"github.com/containerd/containerd/content"
|
|
|
|
"github.com/containerd/containerd/images"
|
|
|
|
"github.com/containerd/containerd/platforms"
|
|
|
|
"github.com/containerd/containerd/remotes"
|
|
|
|
"github.com/containerd/containerd/remotes/docker"
|
2023-09-13 15:41:45 +00:00
|
|
|
"github.com/containerd/log"
|
2023-08-30 16:31:46 +00:00
|
|
|
"github.com/distribution/reference"
|
2016-12-12 23:05:53 +00:00
|
|
|
"github.com/docker/distribution/manifest/schema2"
|
2016-09-06 18:18:12 +00:00
|
|
|
"github.com/docker/docker/api/types"
|
2023-08-26 13:24:46 +00:00
|
|
|
"github.com/docker/docker/api/types/events"
|
2016-11-23 12:58:15 +00:00
|
|
|
"github.com/docker/docker/api/types/filters"
|
2022-03-03 09:31:58 +00:00
|
|
|
"github.com/docker/docker/api/types/registry"
|
Embed DockerVersion in plugin config.
Embedding DockerVersion in plugin config when the plugin is created,
enables users to do a docker plugin inspect and know which version
the plugin was built on. This is helpful in cases where users are
running a new plugin on older docker releases and confused at
unexpected behavior.
By embedding DockerVersion in the config, we claim that there's no
guarantee that if the plugin config's DockerVersion is greater that
the version of the docker engine the plugin is executed against, the
plugin will work as expected.
For example, lets say:
- in 17.03, a plugin was released as johndoe/foo:v1
- in 17.05, the plugin uses the new ipchost config setting and author
publishes johndoe/foo:v2
In this case, johndoe/foo:v2 was built on 17.05 using ipchost, but is
running on docker-engine version 17.03. Since 17.05 > 17.03, there's
no guarantee that the plugin will work as expected. Ofcourse, if the
plugin did not use newly added config settings (ipchost in this case)
in 17.05, it would work fine in 17.03.
Signed-off-by: Anusha Ragunathan <anusha.ragunathan@docker.com>
2017-03-21 21:07:41 +00:00
|
|
|
"github.com/docker/docker/dockerversion"
|
2018-01-11 19:53:06 +00:00
|
|
|
"github.com/docker/docker/errdefs"
|
2017-03-17 21:57:23 +00:00
|
|
|
"github.com/docker/docker/pkg/authorization"
|
2016-10-04 19:01:19 +00:00
|
|
|
"github.com/docker/docker/pkg/chrootarchive"
|
pkg/system: move EnsureRemoveAll() to pkg/containerfs
pkg/system historically has been a bit of a kitchen-sink of things that were
somewhat "system" related, but didn't have a good place for. EnsureRemoveAll()
is one of those utilities. EnsureRemoveAll() is used to both unmount and remove
a path, for which it depends on both github.com/moby/sys/mount, which in turn
depends on github.com/moby/sys/mountinfo.
pkg/system is imported in the CLI, but neither EnsureRemoveAll(), nor any of its
moby/sys dependencies are used on the client side, so let's move this function
somewhere else, to remove those dependencies from the CLI.
I looked for plausible locations that were related; it's used in:
- daemon
- daemon/graphdriver/XXX/
- plugin
I considered moving it into a (e.g.) "utils" package within graphdriver (but not
a huge fan of "utils" packages), and given that it felt (mostly) related to
cleaning up container filesystems, I decided to move it there.
Some things to follow-up on after this:
- Verify if this function is still needed (it feels a bit like a big hammer in
a "YOLO, let's try some things just in case it fails")
- Perhaps it should be integrated in `containerfs.Remove()` (so that it's used
automatically)
- Look if there's other implementations (and if they should be consolidated),
although (e.g.) the one in containerd is a copy of ours:
https://github.com/containerd/containerd/blob/v1.5.9/pkg/cri/server/helpers_linux.go#L200
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-03-02 21:43:07 +00:00
|
|
|
"github.com/docker/docker/pkg/containerfs"
|
2016-12-12 23:05:53 +00:00
|
|
|
"github.com/docker/docker/pkg/pools"
|
|
|
|
"github.com/docker/docker/pkg/progress"
|
2020-02-11 00:31:04 +00:00
|
|
|
"github.com/docker/docker/pkg/stringid"
|
2019-08-05 14:37:47 +00:00
|
|
|
v2 "github.com/docker/docker/plugin/v2"
|
2020-03-13 23:38:24 +00:00
|
|
|
"github.com/moby/sys/mount"
|
2022-03-04 13:49:42 +00:00
|
|
|
"github.com/opencontainers/go-digest"
|
2023-05-08 09:57:52 +00:00
|
|
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
2016-12-13 02:18:17 +00:00
|
|
|
"github.com/pkg/errors"
|
2016-05-16 15:50:55 +00:00
|
|
|
)
|
|
|
|
|
2016-11-23 12:58:15 +00:00
|
|
|
var acceptedPluginFilterTags = map[string]bool{
|
2016-11-23 13:27:09 +00:00
|
|
|
"enabled": true,
|
|
|
|
"capability": true,
|
2016-11-23 12:58:15 +00:00
|
|
|
}
|
|
|
|
|
2016-12-20 16:26:58 +00:00
|
|
|
// Disable deactivates a plugin. This means resources (volumes, networks) cant use them.
|
2016-12-12 23:05:53 +00:00
|
|
|
func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
2016-05-16 15:50:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-01 19:36:56 +00:00
|
|
|
pm.mu.RLock()
|
|
|
|
c := pm.cMap[p]
|
|
|
|
pm.mu.RUnlock()
|
|
|
|
|
2016-12-20 16:26:58 +00:00
|
|
|
if !config.ForceDisable && p.GetRefCount() > 0 {
|
2017-07-19 14:20:13 +00:00
|
|
|
return errors.WithStack(inUseError(p.Name()))
|
2016-12-20 16:26:58 +00:00
|
|
|
}
|
|
|
|
|
2017-03-17 21:57:23 +00:00
|
|
|
for _, typ := range p.GetTypes() {
|
|
|
|
if typ.Capability == authorization.AuthZApiImplements {
|
2017-06-13 10:52:04 +00:00
|
|
|
pm.config.AuthzMiddleware.RemovePlugin(p.Name())
|
2017-03-17 21:57:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-01 19:36:56 +00:00
|
|
|
if err := pm.disable(p, c); err != nil {
|
2016-07-18 15:02:12 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-06-07 17:07:01 +00:00
|
|
|
pm.publisher.Publish(EventDisable{Plugin: p.PluginObj})
|
2023-08-26 13:24:46 +00:00
|
|
|
pm.config.LogPluginEvent(p.GetID(), refOrID, events.ActionDisable)
|
2016-07-18 15:02:12 +00:00
|
|
|
return nil
|
2016-05-16 15:50:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Enable activates a plugin, which implies that they are ready to be used by containers.
|
2016-12-12 23:05:53 +00:00
|
|
|
func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
2016-05-16 15:50:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-21 17:24:01 +00:00
|
|
|
|
2016-12-01 19:36:56 +00:00
|
|
|
c := &controller{timeoutInSecs: config.Timeout}
|
|
|
|
if err := pm.enable(p, c, false); err != nil {
|
2016-07-18 15:02:12 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-06-07 17:07:01 +00:00
|
|
|
pm.publisher.Publish(EventEnable{Plugin: p.PluginObj})
|
2023-08-26 13:24:46 +00:00
|
|
|
pm.config.LogPluginEvent(p.GetID(), refOrID, events.ActionEnable)
|
2016-07-18 15:02:12 +00:00
|
|
|
return nil
|
2016-05-16 15:50:55 +00:00
|
|
|
}
|
|
|
|
|
2016-11-08 02:51:47 +00:00
|
|
|
// Inspect examines a plugin config
|
2016-12-12 23:05:53 +00:00
|
|
|
func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-11-24 04:04:44 +00:00
|
|
|
}
|
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
return &p.PluginObj, nil
|
|
|
|
}
|
2016-11-24 04:04:44 +00:00
|
|
|
|
2017-07-19 14:20:13 +00:00
|
|
|
func computePrivileges(c types.PluginConfig) types.PluginPrivileges {
|
2016-11-24 01:29:21 +00:00
|
|
|
var privileges types.PluginPrivileges
|
2016-12-14 01:46:01 +00:00
|
|
|
if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" {
|
2016-11-24 01:29:21 +00:00
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "network",
|
|
|
|
Description: "permissions to access a network",
|
|
|
|
Value: []string{c.Network.Type},
|
|
|
|
})
|
|
|
|
}
|
2017-03-08 02:26:09 +00:00
|
|
|
if c.IpcHost {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "host ipc namespace",
|
|
|
|
Description: "allow access to host ipc namespace",
|
|
|
|
Value: []string{"true"},
|
|
|
|
})
|
|
|
|
}
|
2017-03-10 22:17:24 +00:00
|
|
|
if c.PidHost {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "host pid namespace",
|
|
|
|
Description: "allow access to host pid namespace",
|
|
|
|
Value: []string{"true"},
|
|
|
|
})
|
|
|
|
}
|
2022-03-03 09:31:58 +00:00
|
|
|
for _, mnt := range c.Mounts {
|
|
|
|
if mnt.Source != nil {
|
2016-11-24 01:29:21 +00:00
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "mount",
|
|
|
|
Description: "host path to mount",
|
2022-03-03 09:31:58 +00:00
|
|
|
Value: []string{*mnt.Source},
|
2016-11-24 01:29:21 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, device := range c.Linux.Devices {
|
|
|
|
if device.Path != nil {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "device",
|
|
|
|
Description: "host device to access",
|
|
|
|
Value: []string{*device.Path},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2017-01-10 19:00:57 +00:00
|
|
|
if c.Linux.AllowAllDevices {
|
2016-11-24 01:29:21 +00:00
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
2017-01-10 19:00:57 +00:00
|
|
|
Name: "allow-all-devices",
|
|
|
|
Description: "allow 'rwm' access to all devices",
|
2016-11-24 01:29:21 +00:00
|
|
|
Value: []string{"true"},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if len(c.Linux.Capabilities) > 0 {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "capabilities",
|
|
|
|
Description: "list of additional capabilities required",
|
|
|
|
Value: c.Linux.Capabilities,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-07-19 14:20:13 +00:00
|
|
|
return privileges
|
2016-11-28 19:08:39 +00:00
|
|
|
}
|
|
|
|
|
2016-11-24 01:29:21 +00:00
|
|
|
// Privileges pulls a plugin config and computes the privileges required to install it.
|
2022-03-03 09:31:58 +00:00
|
|
|
func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *registry.AuthConfig) (types.PluginPrivileges, error) {
|
2020-02-11 00:31:04 +00:00
|
|
|
var (
|
|
|
|
config types.PluginConfig
|
|
|
|
configSeen bool
|
|
|
|
)
|
|
|
|
|
2023-05-08 09:57:52 +00:00
|
|
|
h := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
2020-02-11 00:31:04 +00:00
|
|
|
switch desc.MediaType {
|
2023-05-08 09:57:52 +00:00
|
|
|
case schema2.MediaTypeManifest, ocispec.MediaTypeImageManifest:
|
2020-02-11 00:31:04 +00:00
|
|
|
data, err := content.ReadBlob(ctx, pm.blobStore, desc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error reading image manifest from blob store for %s", ref)
|
|
|
|
}
|
|
|
|
|
2023-05-08 09:57:52 +00:00
|
|
|
var m ocispec.Manifest
|
2020-02-11 00:31:04 +00:00
|
|
|
if err := json.Unmarshal(data, &m); err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error unmarshaling image manifest for %s", ref)
|
|
|
|
}
|
2023-05-08 09:57:52 +00:00
|
|
|
return []ocispec.Descriptor{m.Config}, nil
|
2020-02-11 00:31:04 +00:00
|
|
|
case schema2.MediaTypePluginConfig:
|
|
|
|
configSeen = true
|
|
|
|
data, err := content.ReadBlob(ctx, pm.blobStore, desc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error reading plugin config from blob store for %s", ref)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := json.Unmarshal(data, &config); err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "error unmarshaling plugin config for %s", ref)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
2016-05-16 15:50:55 +00:00
|
|
|
}
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
if err := pm.fetch(ctx, ref, authConfig, progress.DiscardOutput(), metaHeader, images.HandlerFunc(h)); err != nil {
|
|
|
|
return types.PluginPrivileges{}, nil
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
2020-02-11 00:31:04 +00:00
|
|
|
|
|
|
|
if !configSeen {
|
|
|
|
return types.PluginPrivileges{}, errors.Errorf("did not find plugin config for specified reference %s", ref)
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
|
|
|
|
2017-07-19 14:20:13 +00:00
|
|
|
return computePrivileges(config), nil
|
2016-11-24 01:29:21 +00:00
|
|
|
}
|
2016-05-16 15:50:55 +00:00
|
|
|
|
2017-01-29 00:54:32 +00:00
|
|
|
// Upgrade upgrades a plugin
|
2020-02-11 00:31:04 +00:00
|
|
|
//
|
|
|
|
// TODO: replace reference package usage with simpler url.Parse semantics
|
2022-03-03 09:31:58 +00:00
|
|
|
func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *registry.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) {
|
2017-01-29 00:54:32 +00:00
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
|
|
|
if err != nil {
|
2017-07-19 14:20:13 +00:00
|
|
|
return err
|
2017-01-29 00:54:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if p.IsEnabled() {
|
2017-07-19 14:20:13 +00:00
|
|
|
return errors.Wrap(enabledError(p.Name()), "plugin must be disabled before upgrading")
|
2017-01-29 00:54:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// revalidate because Pull is public
|
2017-04-25 07:13:48 +00:00
|
|
|
if _, err := reference.ParseNormalizedNamed(name); err != nil {
|
2017-11-29 04:09:37 +00:00
|
|
|
return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name)
|
2017-01-29 00:54:32 +00:00
|
|
|
}
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
pm.muGC.RLock()
|
|
|
|
defer pm.muGC.RUnlock()
|
|
|
|
|
2021-08-24 10:10:50 +00:00
|
|
|
tmpRootFSDir, err := os.MkdirTemp(pm.tmpDir(), ".rootfs")
|
2017-02-09 08:58:58 +00:00
|
|
|
if err != nil {
|
2020-02-11 00:31:04 +00:00
|
|
|
return errors.Wrap(err, "error creating tmp dir for plugin rootfs")
|
2017-02-09 08:58:58 +00:00
|
|
|
}
|
2017-01-29 00:54:32 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
var md fetchMeta
|
2017-01-29 00:54:32 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
out, waitProgress := setupProgressOutput(outStream, cancel)
|
|
|
|
defer waitProgress()
|
|
|
|
|
|
|
|
if err := pm.fetch(ctx, ref, authConfig, out, metaHeader, storeFetchMetadata(&md), childrenHandler(pm.blobStore), applyLayer(pm.blobStore, tmpRootFSDir, out)); err != nil {
|
|
|
|
return err
|
2017-01-29 00:54:32 +00:00
|
|
|
}
|
2023-08-26 13:24:46 +00:00
|
|
|
pm.config.LogPluginEvent(reference.FamiliarString(ref), name, events.ActionPull)
|
2017-01-29 00:54:32 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
if err := validateFetchedMetadata(md); err != nil {
|
2017-01-29 00:54:32 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
if err := pm.upgradePlugin(p, md.config, md.manifest, md.blobs, tmpRootFSDir, &privileges); err != nil {
|
2017-01-29 00:54:32 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.PluginObj.PluginReference = ref.String()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-24 01:29:21 +00:00
|
|
|
// Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
|
2020-02-11 00:31:04 +00:00
|
|
|
//
|
|
|
|
// TODO: replace reference package usage with simpler url.Parse semantics
|
2022-03-03 09:31:58 +00:00
|
|
|
func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *registry.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...CreateOpt) (err error) {
|
2016-12-12 23:05:53 +00:00
|
|
|
pm.muGC.RLock()
|
|
|
|
defer pm.muGC.RUnlock()
|
|
|
|
|
|
|
|
// revalidate because Pull is public
|
2017-01-26 00:54:18 +00:00
|
|
|
nameref, err := reference.ParseNormalizedNamed(name)
|
2016-11-24 01:29:21 +00:00
|
|
|
if err != nil {
|
2017-11-29 04:09:37 +00:00
|
|
|
return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name)
|
2016-11-24 01:29:21 +00:00
|
|
|
}
|
2017-01-26 00:54:18 +00:00
|
|
|
name = reference.FamiliarString(reference.TagNameOnly(nameref))
|
2016-11-24 01:29:21 +00:00
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
if err := pm.config.Store.validateName(name); err != nil {
|
2017-11-29 04:09:37 +00:00
|
|
|
return errdefs.InvalidParameter(err)
|
2016-11-24 01:29:21 +00:00
|
|
|
}
|
|
|
|
|
2021-08-24 10:10:50 +00:00
|
|
|
tmpRootFSDir, err := os.MkdirTemp(pm.tmpDir(), ".rootfs")
|
2017-02-09 08:58:58 +00:00
|
|
|
if err != nil {
|
2020-02-11 00:31:04 +00:00
|
|
|
return errors.Wrap(errdefs.System(err), "error preparing upgrade")
|
2017-02-09 08:58:58 +00:00
|
|
|
}
|
2016-12-12 23:05:53 +00:00
|
|
|
defer os.RemoveAll(tmpRootFSDir)
|
2016-05-16 15:50:55 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
var md fetchMeta
|
2016-05-16 15:50:55 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
out, waitProgress := setupProgressOutput(outStream, cancel)
|
|
|
|
defer waitProgress()
|
|
|
|
|
|
|
|
if err := pm.fetch(ctx, ref, authConfig, out, metaHeader, storeFetchMetadata(&md), childrenHandler(pm.blobStore), applyLayer(pm.blobStore, tmpRootFSDir, out)); err != nil {
|
|
|
|
return err
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
2023-08-26 13:24:46 +00:00
|
|
|
pm.config.LogPluginEvent(reference.FamiliarString(ref), name, events.ActionPull)
|
2016-11-24 01:29:21 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
if err := validateFetchedMetadata(md); err != nil {
|
2016-11-24 01:29:21 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-07 17:07:01 +00:00
|
|
|
refOpt := func(p *v2.Plugin) {
|
|
|
|
p.PluginObj.PluginReference = ref.String()
|
|
|
|
}
|
|
|
|
optsList := make([]CreateOpt, 0, len(opts)+1)
|
|
|
|
optsList = append(optsList, opts...)
|
|
|
|
optsList = append(optsList, refOpt)
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
// TODO: tmpRootFSDir is empty but should have layers in it
|
|
|
|
p, err := pm.createPlugin(name, md.config, md.manifest, md.blobs, tmpRootFSDir, &privileges, optsList...)
|
2017-01-29 00:54:32 +00:00
|
|
|
if err != nil {
|
2016-11-24 01:29:21 +00:00
|
|
|
return err
|
2016-05-16 15:50:55 +00:00
|
|
|
}
|
2016-11-24 01:29:21 +00:00
|
|
|
|
2017-06-07 17:07:01 +00:00
|
|
|
pm.publisher.Publish(EventCreate{Plugin: p.PluginObj})
|
2020-02-11 00:31:04 +00:00
|
|
|
|
2016-11-24 01:29:21 +00:00
|
|
|
return nil
|
2016-05-16 15:50:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// List displays the list of plugins and associated metadata.
|
2016-11-23 12:58:15 +00:00
|
|
|
func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) {
|
|
|
|
if err := pluginFilters.Validate(acceptedPluginFilterTags); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
enabledOnly := false
|
|
|
|
disabledOnly := false
|
2017-09-26 11:39:56 +00:00
|
|
|
if pluginFilters.Contains("enabled") {
|
2022-11-17 16:45:50 +00:00
|
|
|
enabledFilter, err := pluginFilters.GetBoolOrDefault("enabled", false)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if enabledFilter {
|
2016-11-23 12:58:15 +00:00
|
|
|
enabledOnly = true
|
|
|
|
} else {
|
2022-11-17 16:45:50 +00:00
|
|
|
disabledOnly = true
|
2016-11-23 12:58:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
plugins := pm.config.Store.GetAll()
|
2016-08-26 17:02:38 +00:00
|
|
|
out := make([]types.Plugin, 0, len(plugins))
|
2016-11-23 12:58:15 +00:00
|
|
|
|
2016-11-23 13:27:09 +00:00
|
|
|
next:
|
2016-08-26 17:02:38 +00:00
|
|
|
for _, p := range plugins {
|
2016-11-23 12:58:15 +00:00
|
|
|
if enabledOnly && !p.PluginObj.Enabled {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if disabledOnly && p.PluginObj.Enabled {
|
|
|
|
continue
|
|
|
|
}
|
2017-09-26 11:39:56 +00:00
|
|
|
if pluginFilters.Contains("capability") {
|
2016-11-23 13:27:09 +00:00
|
|
|
for _, f := range p.GetTypes() {
|
|
|
|
if !pluginFilters.Match("capability", f.Capability) {
|
|
|
|
continue next
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-18 15:02:12 +00:00
|
|
|
out = append(out, p.PluginObj)
|
2016-05-16 15:50:55 +00:00
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
// Push pushes a plugin to the registry.
|
2022-03-03 09:31:58 +00:00
|
|
|
func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *registry.AuthConfig, outStream io.Writer) error {
|
2016-12-12 23:05:53 +00:00
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-06-27 15:41:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2017-01-26 00:54:18 +00:00
|
|
|
ref, err := reference.ParseNormalizedNamed(p.Name())
|
2016-08-10 23:48:17 +00:00
|
|
|
if err != nil {
|
2016-12-12 23:05:53 +00:00
|
|
|
return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name())
|
2016-08-10 23:48:17 +00:00
|
|
|
}
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
statusTracker := docker.NewInMemoryTracker()
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
resolver, err := pm.newResolver(ctx, statusTracker, authConfig, metaHeader, false)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
pusher, err := resolver.Pusher(ctx, ref.String())
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "error creating plugin pusher")
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
pj := newPushJobs(statusTracker)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
out, waitProgress := setupProgressOutput(outStream, cancel)
|
|
|
|
defer waitProgress()
|
2016-06-27 15:41:53 +00:00
|
|
|
|
2023-05-08 09:57:52 +00:00
|
|
|
progressHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(ctx).WithField("mediaType", desc.MediaType).WithField("digest", desc.Digest.String()).Debug("Preparing to push plugin layer")
|
2020-02-11 00:31:04 +00:00
|
|
|
id := stringid.TruncateID(desc.Digest.String())
|
|
|
|
pj.add(remotes.MakeRefKey(ctx, desc), id)
|
|
|
|
progress.Update(out, id, "Preparing")
|
|
|
|
return nil, nil
|
|
|
|
})
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
desc, err := pm.getManifestDescriptor(ctx, p)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "error reading plugin manifest")
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
progress.Messagef(out, "", "The push refers to repository [%s]", reference.FamiliarName(ref))
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
// TODO: If a layer already exists on the registry, the progress output just says "Preparing"
|
|
|
|
go func() {
|
|
|
|
timer := time.NewTimer(100 * time.Millisecond)
|
|
|
|
defer timer.Stop()
|
|
|
|
if !timer.Stop() {
|
|
|
|
<-timer.C
|
|
|
|
}
|
|
|
|
var statuses []contentStatus
|
|
|
|
for {
|
|
|
|
timer.Reset(100 * time.Millisecond)
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-timer.C:
|
|
|
|
statuses = pj.status()
|
|
|
|
}
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
for _, s := range statuses {
|
|
|
|
out.WriteProgress(progress.Progress{ID: s.Ref, Current: s.Offset, Total: s.Total, Action: s.Status, LastUpdate: s.Offset == s.Total})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
// Make sure we can authenticate the request since the auth scope for plugin repos is different than a normal repo.
|
|
|
|
ctx = docker.WithScope(ctx, scope(ref, true))
|
2021-06-04 14:32:37 +00:00
|
|
|
if err := remotes.PushContent(ctx, pusher, desc, pm.blobStore, nil, nil, func(h images.Handler) images.Handler {
|
2020-02-11 00:31:04 +00:00
|
|
|
return images.Handlers(progressHandler, h)
|
|
|
|
}); err != nil {
|
|
|
|
// Try fallback to http.
|
|
|
|
// This is needed because the containerd pusher will only attempt the first registry config we pass, which would
|
|
|
|
// typically be https.
|
|
|
|
// If there are no http-only host configs found we'll error out anyway.
|
|
|
|
resolver, _ := pm.newResolver(ctx, statusTracker, authConfig, metaHeader, true)
|
|
|
|
if resolver != nil {
|
|
|
|
pusher, _ := resolver.Pusher(ctx, ref.String())
|
|
|
|
if pusher != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(ctx).WithField("ref", ref).Debug("Re-attmpting push with http-fallback")
|
2021-06-04 14:32:37 +00:00
|
|
|
err2 := remotes.PushContent(ctx, pusher, desc, pm.blobStore, nil, nil, func(h images.Handler) images.Handler {
|
2020-02-11 00:31:04 +00:00
|
|
|
return images.Handlers(progressHandler, h)
|
|
|
|
})
|
|
|
|
if err2 == nil {
|
|
|
|
err = nil
|
|
|
|
} else {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(ctx).WithError(err2).WithField("ref", ref).Debug("Error while attempting push with http-fallback")
|
2020-02-11 00:31:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "error pushing plugin")
|
|
|
|
}
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
// For blobs that already exist in the registry we need to make sure to update the progress otherwise it will just say "pending"
|
|
|
|
// TODO: How to check if the layer already exists? Is it worth it?
|
|
|
|
for _, j := range pj.jobs {
|
|
|
|
progress.Update(out, pj.names[j], "Upload complete")
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
// Signal the client for content trust verification
|
|
|
|
progress.Aux(out, types.PushResult{Tag: ref.(reference.Tagged).Tag(), Digest: desc.Digest.String(), Size: int(desc.Size)})
|
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
// manifest wraps an OCI manifest, because...
|
|
|
|
// Historically the registry does not support plugins unless the media type on the manifest is specifically schema2.MediaTypeManifest
|
|
|
|
// So the OCI manifest media type is not supported.
|
|
|
|
// Additionally, there is extra validation for the docker schema2 manifest than there is a mediatype set on the manifest itself
|
|
|
|
// even though this is set on the descriptor
|
|
|
|
// The OCI types do not have this field.
|
|
|
|
type manifest struct {
|
2023-05-08 09:57:52 +00:00
|
|
|
ocispec.Manifest
|
2020-02-11 00:31:04 +00:00
|
|
|
MediaType string `json:"mediaType,omitempty"`
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
func buildManifest(ctx context.Context, s content.Manager, config digest.Digest, layers []digest.Digest) (manifest, error) {
|
|
|
|
var m manifest
|
|
|
|
m.MediaType = images.MediaTypeDockerSchema2Manifest
|
|
|
|
m.SchemaVersion = 2
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
configInfo, err := s.Info(ctx, config)
|
2016-05-16 15:50:55 +00:00
|
|
|
if err != nil {
|
2020-02-11 00:31:04 +00:00
|
|
|
return m, errors.Wrapf(err, "error reading plugin config content for digest %s", config)
|
|
|
|
}
|
2023-05-08 09:57:52 +00:00
|
|
|
m.Config = ocispec.Descriptor{
|
2020-02-11 00:31:04 +00:00
|
|
|
MediaType: mediaTypePluginConfig,
|
|
|
|
Size: configInfo.Size,
|
|
|
|
Digest: configInfo.Digest,
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
2018-02-15 21:17:27 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
for _, l := range layers {
|
|
|
|
info, err := s.Info(ctx, l)
|
|
|
|
if err != nil {
|
|
|
|
return m, errors.Wrapf(err, "error fetching info for content digest %s", l)
|
|
|
|
}
|
2023-05-08 09:57:52 +00:00
|
|
|
m.Layers = append(m.Layers, ocispec.Descriptor{
|
2021-03-26 22:07:41 +00:00
|
|
|
MediaType: images.MediaTypeDockerSchema2LayerGzip, // TODO: This is assuming everything is a gzip compressed layer, but that may not be true.
|
2020-02-11 00:31:04 +00:00
|
|
|
Digest: l,
|
|
|
|
Size: info.Size,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return m, nil
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
// getManifestDescriptor gets the OCI descriptor for a manifest
|
|
|
|
// It will generate a manifest if one does not exist
|
2023-05-08 09:57:52 +00:00
|
|
|
func (pm *Manager) getManifestDescriptor(ctx context.Context, p *v2.Plugin) (ocispec.Descriptor, error) {
|
2023-06-23 00:33:17 +00:00
|
|
|
logger := log.G(ctx).WithField("plugin", p.Name()).WithField("digest", p.Manifest)
|
2020-02-11 00:31:04 +00:00
|
|
|
if p.Manifest != "" {
|
|
|
|
info, err := pm.blobStore.Info(ctx, p.Manifest)
|
|
|
|
if err == nil {
|
2023-05-08 09:57:52 +00:00
|
|
|
desc := ocispec.Descriptor{
|
2020-02-11 00:31:04 +00:00
|
|
|
Size: info.Size,
|
|
|
|
Digest: info.Digest,
|
|
|
|
MediaType: images.MediaTypeDockerSchema2Manifest,
|
|
|
|
}
|
|
|
|
return desc, nil
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
2020-02-11 00:31:04 +00:00
|
|
|
logger.WithError(err).Debug("Could not find plugin manifest in content store")
|
|
|
|
} else {
|
|
|
|
logger.Info("Plugin does not have manifest digest")
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
2020-02-11 00:31:04 +00:00
|
|
|
logger.Info("Building a new plugin manifest")
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
manifest, err := buildManifest(ctx, pm.blobStore, p.Config, p.Blobsums)
|
|
|
|
if err != nil {
|
2023-05-08 09:57:52 +00:00
|
|
|
return ocispec.Descriptor{}, err
|
2020-02-11 00:31:04 +00:00
|
|
|
}
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
desc, err := writeManifest(ctx, pm.blobStore, &manifest)
|
|
|
|
if err != nil {
|
|
|
|
return desc, err
|
|
|
|
}
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
if err := pm.save(p); err != nil {
|
|
|
|
logger.WithError(err).Error("Could not save plugin with manifest digest")
|
|
|
|
}
|
|
|
|
return desc, nil
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
|
|
|
|
2023-05-08 09:57:52 +00:00
|
|
|
func writeManifest(ctx context.Context, cs content.Store, m *manifest) (ocispec.Descriptor, error) {
|
2020-02-11 00:31:04 +00:00
|
|
|
platform := platforms.DefaultSpec()
|
2023-05-08 09:57:52 +00:00
|
|
|
desc := ocispec.Descriptor{
|
2020-02-11 00:31:04 +00:00
|
|
|
MediaType: images.MediaTypeDockerSchema2Manifest,
|
|
|
|
Platform: &platform,
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
2020-02-11 00:31:04 +00:00
|
|
|
data, err := json.Marshal(m)
|
|
|
|
if err != nil {
|
|
|
|
return desc, errors.Wrap(err, "error encoding manifest")
|
2016-05-16 15:50:55 +00:00
|
|
|
}
|
2020-02-11 00:31:04 +00:00
|
|
|
desc.Digest = digest.FromBytes(data)
|
|
|
|
desc.Size = int64(len(data))
|
2016-12-12 23:05:53 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
if err := content.WriteBlob(ctx, cs, remotes.MakeRefKey(ctx, desc), bytes.NewReader(data), desc); err != nil {
|
|
|
|
return desc, errors.Wrap(err, "error writing plugin manifest")
|
|
|
|
}
|
|
|
|
return desc, nil
|
2016-05-16 15:50:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove deletes plugin's root directory.
|
2016-12-12 23:05:53 +00:00
|
|
|
func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-12-01 19:36:56 +00:00
|
|
|
pm.mu.RLock()
|
|
|
|
c := pm.cMap[p]
|
|
|
|
pm.mu.RUnlock()
|
|
|
|
|
2016-05-16 15:50:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-07 13:59:15 +00:00
|
|
|
|
|
|
|
if !config.ForceRemove {
|
2016-12-01 19:36:56 +00:00
|
|
|
if p.GetRefCount() > 0 {
|
2017-07-19 14:20:13 +00:00
|
|
|
return inUseError(p.Name())
|
2016-09-07 13:59:15 +00:00
|
|
|
}
|
|
|
|
if p.IsEnabled() {
|
2017-07-19 14:20:13 +00:00
|
|
|
return enabledError(p.Name())
|
2016-08-26 17:02:38 +00:00
|
|
|
}
|
2016-09-07 13:59:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if p.IsEnabled() {
|
2016-12-01 19:36:56 +00:00
|
|
|
if err := pm.disable(p, c); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Errorf("failed to disable plugin '%s': %s", p.Name(), err)
|
2016-08-26 17:02:38 +00:00
|
|
|
}
|
2016-07-18 15:02:12 +00:00
|
|
|
}
|
2016-09-07 13:59:15 +00:00
|
|
|
|
2016-12-13 02:18:17 +00:00
|
|
|
defer func() {
|
2016-12-12 23:05:53 +00:00
|
|
|
go pm.GC()
|
2016-12-13 02:18:17 +00:00
|
|
|
}()
|
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
id := p.GetID()
|
|
|
|
pluginDir := filepath.Join(pm.config.Root, id)
|
2017-06-26 18:54:14 +00:00
|
|
|
|
|
|
|
if err := mount.RecursiveUnmount(pluginDir); err != nil {
|
|
|
|
return errors.Wrap(err, "error unmounting plugin data")
|
2017-02-03 04:08:35 +00:00
|
|
|
}
|
2017-06-26 18:54:14 +00:00
|
|
|
|
2017-08-02 18:28:49 +00:00
|
|
|
if err := atomicRemoveAll(pluginDir); err != nil {
|
|
|
|
return err
|
2017-06-26 18:54:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pm.config.Store.Remove(p)
|
2023-08-26 13:24:46 +00:00
|
|
|
pm.config.LogPluginEvent(id, name, events.ActionRemove)
|
2017-06-07 17:07:01 +00:00
|
|
|
pm.publisher.Publish(EventRemove{Plugin: p.PluginObj})
|
2016-07-18 15:02:12 +00:00
|
|
|
return nil
|
2016-05-16 15:50:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set sets plugin args
|
|
|
|
func (pm *Manager) Set(name string, args []string) error {
|
2016-12-12 23:05:53 +00:00
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-05-16 15:50:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-12 23:05:53 +00:00
|
|
|
if err := p.Set(args); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return pm.save(p)
|
2016-05-16 15:50:55 +00:00
|
|
|
}
|
2016-10-04 19:01:19 +00:00
|
|
|
|
|
|
|
// CreateFromContext creates a plugin from the given pluginDir which contains
|
2016-11-08 02:51:47 +00:00
|
|
|
// both the rootfs and the config.json and a repoName with optional tag.
|
2016-12-12 23:05:53 +00:00
|
|
|
func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) {
|
|
|
|
pm.muGC.RLock()
|
|
|
|
defer pm.muGC.RUnlock()
|
|
|
|
|
2017-01-26 00:54:18 +00:00
|
|
|
ref, err := reference.ParseNormalizedNamed(options.RepoName)
|
2016-11-29 20:55:41 +00:00
|
|
|
if err != nil {
|
2016-12-12 23:05:53 +00:00
|
|
|
return errors.Wrapf(err, "failed to parse reference %v", options.RepoName)
|
|
|
|
}
|
|
|
|
if _, ok := ref.(reference.Canonical); ok {
|
|
|
|
return errors.Errorf("canonical references are not permitted")
|
2016-11-29 20:55:41 +00:00
|
|
|
}
|
2017-01-26 00:54:18 +00:00
|
|
|
name := reference.FamiliarString(reference.TagNameOnly(ref))
|
2016-11-29 20:55:41 +00:00
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin()
|
|
|
|
return err
|
|
|
|
}
|
2016-10-04 19:01:19 +00:00
|
|
|
|
2021-08-24 10:10:50 +00:00
|
|
|
tmpRootFSDir, err := os.MkdirTemp(pm.tmpDir(), ".rootfs")
|
2016-12-12 23:05:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to create temp directory")
|
|
|
|
}
|
2017-02-09 08:58:58 +00:00
|
|
|
defer os.RemoveAll(tmpRootFSDir)
|
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
var configJSON []byte
|
|
|
|
rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON)
|
2016-11-29 20:55:41 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
rootFSBlob, err := pm.blobStore.Writer(ctx, content.WithRef(name))
|
2016-12-12 23:05:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-11-29 20:55:41 +00:00
|
|
|
}
|
2016-12-12 23:05:53 +00:00
|
|
|
defer rootFSBlob.Close()
|
2020-02-11 00:31:04 +00:00
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
gzw := gzip.NewWriter(rootFSBlob)
|
2020-02-11 00:31:04 +00:00
|
|
|
rootFSReader := io.TeeReader(rootFS, gzw)
|
2016-11-29 20:55:41 +00:00
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := rootFS.Close(); err != nil {
|
2016-10-04 19:01:19 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
if configJSON == nil {
|
|
|
|
return errors.New("config not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := gzw.Close(); err != nil {
|
|
|
|
return errors.Wrap(err, "error closing gzip writer")
|
|
|
|
}
|
|
|
|
|
|
|
|
var config types.PluginConfig
|
|
|
|
if err := json.Unmarshal(configJSON, &config); err != nil {
|
|
|
|
return errors.Wrap(err, "failed to parse config")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := pm.validateConfig(config); err != nil {
|
2016-11-22 17:42:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
pm.mu.Lock()
|
|
|
|
defer pm.mu.Unlock()
|
2016-11-22 17:42:58 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
if err := rootFSBlob.Commit(ctx, 0, ""); err != nil {
|
2016-10-04 19:01:19 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-12-12 23:05:53 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
go pm.GC()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
config.Rootfs = &types.PluginConfigRootfs{
|
|
|
|
Type: "layers",
|
2020-02-11 00:31:04 +00:00
|
|
|
DiffIds: []string{rootFSBlob.Digest().String()},
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
2016-10-04 19:01:19 +00:00
|
|
|
|
Embed DockerVersion in plugin config.
Embedding DockerVersion in plugin config when the plugin is created,
enables users to do a docker plugin inspect and know which version
the plugin was built on. This is helpful in cases where users are
running a new plugin on older docker releases and confused at
unexpected behavior.
By embedding DockerVersion in the config, we claim that there's no
guarantee that if the plugin config's DockerVersion is greater that
the version of the docker engine the plugin is executed against, the
plugin will work as expected.
For example, lets say:
- in 17.03, a plugin was released as johndoe/foo:v1
- in 17.05, the plugin uses the new ipchost config setting and author
publishes johndoe/foo:v2
In this case, johndoe/foo:v2 was built on 17.05 using ipchost, but is
running on docker-engine version 17.03. Since 17.05 > 17.03, there's
no guarantee that the plugin will work as expected. Ofcourse, if the
plugin did not use newly added config settings (ipchost in this case)
in 17.05, it would work fine in 17.03.
Signed-off-by: Anusha Ragunathan <anusha.ragunathan@docker.com>
2017-03-21 21:07:41 +00:00
|
|
|
config.DockerVersion = dockerversion.Version
|
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
configBlob, err := pm.blobStore.Writer(ctx, content.WithRef(name+"-config.json"))
|
2016-12-12 23:05:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer configBlob.Close()
|
|
|
|
if err := json.NewEncoder(configBlob).Encode(config); err != nil {
|
|
|
|
return errors.Wrap(err, "error encoding json config")
|
|
|
|
}
|
2020-02-11 00:31:04 +00:00
|
|
|
if err := configBlob.Commit(ctx, 0, ""); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
configDigest := configBlob.Digest()
|
|
|
|
layers := []digest.Digest{rootFSBlob.Digest()}
|
|
|
|
|
|
|
|
manifest, err := buildManifest(ctx, pm.blobStore, configDigest, layers)
|
2016-12-12 23:05:53 +00:00
|
|
|
if err != nil {
|
2016-10-04 19:01:19 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-02-11 00:31:04 +00:00
|
|
|
desc, err := writeManifest(ctx, pm.blobStore, &manifest)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2016-10-04 19:01:19 +00:00
|
|
|
|
2020-02-11 00:31:04 +00:00
|
|
|
p, err := pm.createPlugin(name, configDigest, desc.Digest, layers, tmpRootFSDir, nil)
|
2016-12-12 23:05:53 +00:00
|
|
|
if err != nil {
|
2016-11-22 17:42:58 +00:00
|
|
|
return err
|
|
|
|
}
|
2017-01-26 00:54:18 +00:00
|
|
|
p.PluginObj.PluginReference = name
|
2016-10-04 19:01:19 +00:00
|
|
|
|
2017-06-07 17:07:01 +00:00
|
|
|
pm.publisher.Publish(EventCreate{Plugin: p.PluginObj})
|
2023-08-26 13:24:46 +00:00
|
|
|
pm.config.LogPluginEvent(p.PluginObj.ID, name, events.ActionCreate)
|
2016-10-04 19:01:19 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2016-11-24 04:04:44 +00:00
|
|
|
|
2016-12-12 23:05:53 +00:00
|
|
|
func (pm *Manager) validateConfig(config types.PluginConfig) error {
|
|
|
|
return nil // TODO:
|
|
|
|
}
|
|
|
|
|
|
|
|
func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser {
|
|
|
|
pr, pw := io.Pipe()
|
|
|
|
go func() {
|
|
|
|
tarReader := tar.NewReader(in)
|
|
|
|
tarWriter := tar.NewWriter(pw)
|
|
|
|
defer in.Close()
|
|
|
|
|
|
|
|
hasRootFS := false
|
|
|
|
|
|
|
|
for {
|
|
|
|
hdr, err := tarReader.Next()
|
|
|
|
if err == io.EOF {
|
|
|
|
if !hasRootFS {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "no rootfs found"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Signals end of archive.
|
|
|
|
tarWriter.Close()
|
|
|
|
pw.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "failed to read from tar"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
content := io.Reader(tarReader)
|
|
|
|
name := path.Clean(hdr.Name)
|
|
|
|
if path.IsAbs(name) {
|
|
|
|
name = name[1:]
|
|
|
|
}
|
|
|
|
if name == configFileName {
|
2021-08-24 10:10:50 +00:00
|
|
|
dt, err := io.ReadAll(content)
|
2016-12-12 23:05:53 +00:00
|
|
|
if err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
*config = dt
|
|
|
|
}
|
|
|
|
if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName {
|
|
|
|
hdr.Name = path.Clean(path.Join(parts[1:]...))
|
|
|
|
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") {
|
|
|
|
hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:]
|
|
|
|
}
|
|
|
|
if err := tarWriter.WriteHeader(hdr); err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "error writing tar header"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, err := pools.Copy(tarWriter, content); err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "error copying tar data"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
hasRootFS = true
|
|
|
|
} else {
|
2021-08-24 10:10:50 +00:00
|
|
|
io.Copy(io.Discard, content)
|
2016-12-12 23:05:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return pr
|
2016-11-24 04:04:44 +00:00
|
|
|
}
|
2017-08-02 18:28:49 +00:00
|
|
|
|
|
|
|
func atomicRemoveAll(dir string) error {
|
|
|
|
renamed := dir + "-removing"
|
|
|
|
|
|
|
|
err := os.Rename(dir, renamed)
|
|
|
|
switch {
|
|
|
|
case os.IsNotExist(err), err == nil:
|
|
|
|
// even if `dir` doesn't exist, we can still try and remove `renamed`
|
|
|
|
case os.IsExist(err):
|
|
|
|
// Some previous remove failed, check if the origin dir exists
|
pkg/system: move EnsureRemoveAll() to pkg/containerfs
pkg/system historically has been a bit of a kitchen-sink of things that were
somewhat "system" related, but didn't have a good place for. EnsureRemoveAll()
is one of those utilities. EnsureRemoveAll() is used to both unmount and remove
a path, for which it depends on both github.com/moby/sys/mount, which in turn
depends on github.com/moby/sys/mountinfo.
pkg/system is imported in the CLI, but neither EnsureRemoveAll(), nor any of its
moby/sys dependencies are used on the client side, so let's move this function
somewhere else, to remove those dependencies from the CLI.
I looked for plausible locations that were related; it's used in:
- daemon
- daemon/graphdriver/XXX/
- plugin
I considered moving it into a (e.g.) "utils" package within graphdriver (but not
a huge fan of "utils" packages), and given that it felt (mostly) related to
cleaning up container filesystems, I decided to move it there.
Some things to follow-up on after this:
- Verify if this function is still needed (it feels a bit like a big hammer in
a "YOLO, let's try some things just in case it fails")
- Perhaps it should be integrated in `containerfs.Remove()` (so that it's used
automatically)
- Look if there's other implementations (and if they should be consolidated),
although (e.g.) the one in containerd is a copy of ours:
https://github.com/containerd/containerd/blob/v1.5.9/pkg/cri/server/helpers_linux.go#L200
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-03-02 21:43:07 +00:00
|
|
|
if e := containerfs.EnsureRemoveAll(renamed); e != nil {
|
2017-08-02 18:28:49 +00:00
|
|
|
return errors.Wrap(err, "rename target already exists and could not be removed")
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
|
|
|
// origin doesn't exist, nothing left to do
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// attempt to rename again
|
|
|
|
if err := os.Rename(dir, renamed); err != nil {
|
|
|
|
return errors.Wrap(err, "failed to rename dir for atomic removal")
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return errors.Wrap(err, "failed to rename dir for atomic removal")
|
|
|
|
}
|
|
|
|
|
pkg/system: move EnsureRemoveAll() to pkg/containerfs
pkg/system historically has been a bit of a kitchen-sink of things that were
somewhat "system" related, but didn't have a good place for. EnsureRemoveAll()
is one of those utilities. EnsureRemoveAll() is used to both unmount and remove
a path, for which it depends on both github.com/moby/sys/mount, which in turn
depends on github.com/moby/sys/mountinfo.
pkg/system is imported in the CLI, but neither EnsureRemoveAll(), nor any of its
moby/sys dependencies are used on the client side, so let's move this function
somewhere else, to remove those dependencies from the CLI.
I looked for plausible locations that were related; it's used in:
- daemon
- daemon/graphdriver/XXX/
- plugin
I considered moving it into a (e.g.) "utils" package within graphdriver (but not
a huge fan of "utils" packages), and given that it felt (mostly) related to
cleaning up container filesystems, I decided to move it there.
Some things to follow-up on after this:
- Verify if this function is still needed (it feels a bit like a big hammer in
a "YOLO, let's try some things just in case it fails")
- Perhaps it should be integrated in `containerfs.Remove()` (so that it's used
automatically)
- Look if there's other implementations (and if they should be consolidated),
although (e.g.) the one in containerd is a copy of ours:
https://github.com/containerd/containerd/blob/v1.5.9/pkg/cri/server/helpers_linux.go#L200
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-03-02 21:43:07 +00:00
|
|
|
if err := containerfs.EnsureRemoveAll(renamed); err != nil {
|
2017-08-02 18:28:49 +00:00
|
|
|
os.Rename(renamed, dir)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|