daemon: read-copy-update the daemon config
Ensure data-race-free access to the daemon configuration without locking by mutating a deep copy of the config and atomically storing a pointer to the copy into the daemon-wide configStore value. Any operations which need to read from the daemon config must capture the configStore value only once and pass it around to guarantee a consistent view of the config. Signed-off-by: Cory Snider <csnider@mirantis.com>
This commit is contained in:
parent
742ac6e275
commit
0b592467d9
62 changed files with 1819 additions and 568 deletions
|
@ -268,7 +268,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
|||
// Restart all autostart containers which has a swarm endpoint
|
||||
// and is not yet running now that we have successfully
|
||||
// initialized the cluster.
|
||||
d.RestartSwarmContainers()
|
||||
d.RestartSwarmContainers(cli.Config)
|
||||
|
||||
logrus.Info("Daemon has completed initialization")
|
||||
|
||||
|
@ -371,7 +371,7 @@ func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daem
|
|||
DefaultCgroupParent: cgroupParent,
|
||||
RegistryHosts: d.RegistryHosts,
|
||||
BuilderConfig: config.Builder,
|
||||
Rootless: d.Rootless(),
|
||||
Rootless: daemon.Rootless(config),
|
||||
IdentityMapping: d.IdentityMapping(),
|
||||
DNSConfig: config.DNSConfig,
|
||||
ApparmorProfile: daemon.DefaultApparmorProfile(),
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/unicode"
|
||||
|
@ -227,7 +226,6 @@ type CommonConfig struct {
|
|||
NetworkConfig
|
||||
registry.ServiceOptions
|
||||
|
||||
sync.Mutex
|
||||
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
|
||||
// It should probably be handled outside this package.
|
||||
ValuesSet map[string]interface{} `json:"-"`
|
||||
|
@ -650,11 +648,11 @@ func Validate(config *Config) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
|
||||
if !builtinRuntimes[defaultRuntime] {
|
||||
if config.DefaultRuntime != "" {
|
||||
if !builtinRuntimes[config.DefaultRuntime] {
|
||||
runtimes := config.GetAllRuntimes()
|
||||
if _, ok := runtimes[defaultRuntime]; !ok && !IsPermissibleC8dRuntimeName(defaultRuntime) {
|
||||
return errors.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
|
||||
if _, ok := runtimes[config.DefaultRuntime]; !ok && !IsPermissibleC8dRuntimeName(config.DefaultRuntime) {
|
||||
return fmt.Errorf("specified default runtime '%s' does not exist", config.DefaultRuntime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -669,15 +667,6 @@ func Validate(config *Config) error {
|
|||
return config.ValidatePlatformConfig()
|
||||
}
|
||||
|
||||
// GetDefaultRuntimeName returns the current default runtime
|
||||
func (conf *Config) GetDefaultRuntimeName() string {
|
||||
conf.Lock()
|
||||
rt := conf.DefaultRuntime
|
||||
conf.Unlock()
|
||||
|
||||
return rt
|
||||
}
|
||||
|
||||
// MaskCredentials masks credentials that are in an URL.
|
||||
func MaskCredentials(rawURL string) string {
|
||||
parsedURL, err := url.Parse(rawURL)
|
||||
|
|
|
@ -84,8 +84,6 @@ type Config struct {
|
|||
// GetRuntime returns the runtime path and arguments for a given
|
||||
// runtime name
|
||||
func (conf *Config) GetRuntime(name string) *types.Runtime {
|
||||
conf.Lock()
|
||||
defer conf.Unlock()
|
||||
if rt, ok := conf.Runtimes[name]; ok {
|
||||
return &rt
|
||||
}
|
||||
|
@ -94,10 +92,7 @@ func (conf *Config) GetRuntime(name string) *types.Runtime {
|
|||
|
||||
// GetAllRuntimes returns a copy of the runtimes map
|
||||
func (conf *Config) GetAllRuntimes() map[string]types.Runtime {
|
||||
conf.Lock()
|
||||
rts := conf.Runtimes
|
||||
conf.Unlock()
|
||||
return rts
|
||||
return conf.Runtimes
|
||||
}
|
||||
|
||||
// GetExecRoot returns the user configured Exec-root
|
||||
|
@ -107,8 +102,6 @@ func (conf *Config) GetExecRoot() string {
|
|||
|
||||
// GetInitPath returns the configured docker-init path
|
||||
func (conf *Config) GetInitPath() string {
|
||||
conf.Lock()
|
||||
defer conf.Unlock()
|
||||
if conf.InitPath != "" {
|
||||
return conf.InitPath
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
|
@ -206,10 +207,10 @@ func (daemon *Daemon) generateHostname(id string, config *containertypes.Config)
|
|||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error {
|
||||
func (daemon *Daemon) setSecurityOptions(cfg *config.Config, container *container.Container, hostConfig *containertypes.HostConfig) error {
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
return daemon.parseSecurityOpt(&container.SecurityOptions, hostConfig)
|
||||
return daemon.parseSecurityOpt(cfg, &container.SecurityOptions, hostConfig)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error {
|
||||
|
@ -234,7 +235,7 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *
|
|||
|
||||
// verifyContainerSettings performs validation of the hostconfig and config
|
||||
// structures.
|
||||
func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) (warnings []string, err error) {
|
||||
func (daemon *Daemon) verifyContainerSettings(daemonCfg *config.Config, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) (warnings []string, err error) {
|
||||
// First perform verification of settings common across all platforms.
|
||||
if err = validateContainerConfig(config); err != nil {
|
||||
return warnings, err
|
||||
|
@ -244,7 +245,7 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon
|
|||
}
|
||||
|
||||
// Now do platform-specific verification
|
||||
warnings, err = verifyPlatformContainerSettings(daemon, hostConfig, update)
|
||||
warnings, err = verifyPlatformContainerSettings(daemon, daemonCfg, hostConfig, update)
|
||||
for _, w := range warnings {
|
||||
logrus.Warn(w)
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
containertypes "github.com/docker/docker/api/types/container"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
|
@ -26,19 +27,19 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []string {
|
||||
func (daemon *Daemon) getDNSSearchSettings(cfg *config.Config, container *container.Container) []string {
|
||||
if len(container.HostConfig.DNSSearch) > 0 {
|
||||
return container.HostConfig.DNSSearch
|
||||
}
|
||||
|
||||
if len(daemon.configStore.DNSSearch) > 0 {
|
||||
return daemon.configStore.DNSSearch
|
||||
if len(cfg.DNSSearch) > 0 {
|
||||
return cfg.DNSSearch
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) {
|
||||
func (daemon *Daemon) buildSandboxOptions(cfg *config.Config, container *container.Container) ([]libnetwork.SandboxOption, error) {
|
||||
var (
|
||||
sboxOptions []libnetwork.SandboxOption
|
||||
err error
|
||||
|
@ -61,21 +62,21 @@ func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]lib
|
|||
sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey())
|
||||
}
|
||||
|
||||
if err = daemon.setupPathsAndSandboxOptions(container, &sboxOptions); err != nil {
|
||||
if err = daemon.setupPathsAndSandboxOptions(container, cfg, &sboxOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(container.HostConfig.DNS) > 0 {
|
||||
dns = container.HostConfig.DNS
|
||||
} else if len(daemon.configStore.DNS) > 0 {
|
||||
dns = daemon.configStore.DNS
|
||||
} else if len(cfg.DNS) > 0 {
|
||||
dns = cfg.DNS
|
||||
}
|
||||
|
||||
for _, d := range dns {
|
||||
sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d))
|
||||
}
|
||||
|
||||
dnsSearch := daemon.getDNSSearchSettings(container)
|
||||
dnsSearch := daemon.getDNSSearchSettings(cfg, container)
|
||||
|
||||
for _, ds := range dnsSearch {
|
||||
sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds))
|
||||
|
@ -83,8 +84,8 @@ func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]lib
|
|||
|
||||
if len(container.HostConfig.DNSOptions) > 0 {
|
||||
dnsOptions = container.HostConfig.DNSOptions
|
||||
} else if len(daemon.configStore.DNSOptions) > 0 {
|
||||
dnsOptions = daemon.configStore.DNSOptions
|
||||
} else if len(cfg.DNSOptions) > 0 {
|
||||
dnsOptions = cfg.DNSOptions
|
||||
}
|
||||
|
||||
for _, ds := range dnsOptions {
|
||||
|
@ -112,7 +113,7 @@ func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]lib
|
|||
// value with the IP address stored in the daemon level HostGatewayIP
|
||||
// config variable
|
||||
if ip == opts.HostGatewayName {
|
||||
gateway := daemon.configStore.HostGatewayIP.String()
|
||||
gateway := cfg.HostGatewayIP.String()
|
||||
if gateway == "" {
|
||||
return nil, fmt.Errorf("unable to derive the IP value for host-gateway")
|
||||
}
|
||||
|
@ -218,7 +219,7 @@ func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]lib
|
|||
}
|
||||
|
||||
for alias, parent := range daemon.parents(container) {
|
||||
if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() {
|
||||
if cfg.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -291,13 +292,13 @@ func (daemon *Daemon) updateNetworkSettings(container *container.Container, n li
|
|||
return nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep *libnetwork.Endpoint) error {
|
||||
func (daemon *Daemon) updateEndpointNetworkSettings(cfg *config.Config, container *container.Container, n libnetwork.Network, ep *libnetwork.Endpoint) error {
|
||||
if err := buildEndpointInfo(container.NetworkSettings, n, ep); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() {
|
||||
container.NetworkSettings.Bridge = daemon.configStore.BridgeConfig.Iface
|
||||
container.NetworkSettings.Bridge = cfg.BridgeConfig.Iface
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -305,7 +306,7 @@ func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Contain
|
|||
|
||||
// UpdateNetwork is used to update the container's network (e.g. when linked containers
|
||||
// get removed/unlinked).
|
||||
func (daemon *Daemon) updateNetwork(container *container.Container) error {
|
||||
func (daemon *Daemon) updateNetwork(cfg *config.Config, container *container.Container) error {
|
||||
var (
|
||||
start = time.Now()
|
||||
ctrl = daemon.netController
|
||||
|
@ -335,7 +336,7 @@ func (daemon *Daemon) updateNetwork(container *container.Container) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
sbOptions, err := daemon.buildSandboxOptions(container)
|
||||
sbOptions, err := daemon.buildSandboxOptions(cfg, container)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update network failed: %v", err)
|
||||
}
|
||||
|
@ -519,7 +520,7 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
|
|||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr error) {
|
||||
func (daemon *Daemon) allocateNetwork(cfg *config.Config, container *container.Container) (retErr error) {
|
||||
if daemon.netController == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -552,7 +553,7 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr er
|
|||
defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName()
|
||||
if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok {
|
||||
cleanOperationalData(nConf)
|
||||
if err := daemon.connectToNetwork(container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil {
|
||||
if err := daemon.connectToNetwork(cfg, container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -569,7 +570,7 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr er
|
|||
|
||||
for netName, epConf := range networks {
|
||||
cleanOperationalData(epConf)
|
||||
if err := daemon.connectToNetwork(container, netName, epConf.EndpointSettings, updateSettings); err != nil {
|
||||
if err := daemon.connectToNetwork(cfg, container, netName, epConf.EndpointSettings, updateSettings); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -578,7 +579,7 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr er
|
|||
// create its network sandbox now if not present
|
||||
if len(networks) == 0 {
|
||||
if nil == daemon.getNetworkSandbox(container) {
|
||||
sbOptions, err := daemon.buildSandboxOptions(container)
|
||||
sbOptions, err := daemon.buildSandboxOptions(cfg, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -722,13 +723,13 @@ func (daemon *Daemon) updateNetworkConfig(container *container.Container, n libn
|
|||
return nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) {
|
||||
func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) {
|
||||
start := time.Now()
|
||||
if container.HostConfig.NetworkMode.IsContainer() {
|
||||
return runconfig.ErrConflictSharedNetwork
|
||||
}
|
||||
if containertypes.NetworkMode(idOrName).IsBridge() &&
|
||||
daemon.configStore.DisableBridge {
|
||||
cfg.DisableBridge {
|
||||
container.Config.NetworkDisabled = true
|
||||
return nil
|
||||
}
|
||||
|
@ -766,7 +767,7 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName
|
|||
|
||||
controller := daemon.netController
|
||||
sb := daemon.getNetworkSandbox(container)
|
||||
createOptions, err := buildCreateEndpointOptions(container, n, endpointConfig, sb, daemon.configStore.DNS)
|
||||
createOptions, err := buildCreateEndpointOptions(container, n, endpointConfig, sb, cfg.DNS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -790,12 +791,12 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName
|
|||
|
||||
delete(container.NetworkSettings.Networks, n.ID())
|
||||
|
||||
if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil {
|
||||
if err := daemon.updateEndpointNetworkSettings(cfg, container, n, ep); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sb == nil {
|
||||
sbOptions, err := daemon.buildSandboxOptions(container)
|
||||
sbOptions, err := daemon.buildSandboxOptions(cfg, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -946,7 +947,7 @@ func (daemon *Daemon) tryDetachContainerFromClusterNetwork(network libnetwork.Ne
|
|||
daemon.LogNetworkEventWithAttributes(network, "disconnect", attributes)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) initializeNetworking(container *container.Container) error {
|
||||
func (daemon *Daemon) initializeNetworking(cfg *config.Config, container *container.Container) error {
|
||||
var err error
|
||||
|
||||
if container.HostConfig.NetworkMode.IsContainer() {
|
||||
|
@ -975,7 +976,7 @@ func (daemon *Daemon) initializeNetworking(container *container.Container) error
|
|||
}
|
||||
}
|
||||
|
||||
if err := daemon.allocateNetwork(container); err != nil {
|
||||
if err := daemon.allocateNetwork(cfg, container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1074,7 +1075,7 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName
|
|||
}
|
||||
}
|
||||
} else {
|
||||
if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil {
|
||||
if err := daemon.connectToNetwork(daemon.config(), container, idOrName, endpointConfig, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/links"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
|
@ -380,7 +381,7 @@ func serviceDiscoveryOnDefaultNetwork() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error {
|
||||
func (daemon *Daemon) setupPathsAndSandboxOptions(container *container.Container, cfg *config.Config, sboxOptions *[]libnetwork.SandboxOption) error {
|
||||
var err error
|
||||
|
||||
// Set the correct paths for /etc/hosts and /etc/resolv.conf, based on the
|
||||
|
@ -427,7 +428,7 @@ func (daemon *Daemon) setupPathsAndSandboxOptions(container *container.Container
|
|||
// Copy the host's resolv.conf for the container (/run/systemd/resolve/resolv.conf or /etc/resolv.conf)
|
||||
*sboxOptions = append(
|
||||
*sboxOptions,
|
||||
libnetwork.OptionOriginResolvConfPath(daemon.configStore.GetResolvConf()),
|
||||
libnetwork.OptionOriginResolvConfPath(cfg.GetResolvConf()),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -161,7 +162,7 @@ func serviceDiscoveryOnDefaultNetwork() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error {
|
||||
func (daemon *Daemon) setupPathsAndSandboxOptions(container *container.Container, cfg *config.Config, sboxOptions *[]libnetwork.SandboxOption) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -33,8 +33,9 @@ func TestContainerWarningHostAndPublishPorts(t *testing.T) {
|
|||
}
|
||||
cs := &config.Config{}
|
||||
configureRuntimes(cs)
|
||||
d := &Daemon{configStore: cs}
|
||||
wrns, err := d.verifyContainerSettings(hostConfig, &containertypes.Config{}, false)
|
||||
d := &Daemon{}
|
||||
d.configStore.Store(cs)
|
||||
wrns, err := d.verifyContainerSettings(cs, hostConfig, &containertypes.Config{}, false)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, tc.warnings, wrns)
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
|
@ -34,7 +35,7 @@ type createOpts struct {
|
|||
|
||||
// CreateManagedContainer creates a container that is managed by a Service
|
||||
func (daemon *Daemon) CreateManagedContainer(ctx context.Context, params types.ContainerCreateConfig) (containertypes.CreateResponse, error) {
|
||||
return daemon.containerCreate(ctx, createOpts{
|
||||
return daemon.containerCreate(ctx, daemon.config(), createOpts{
|
||||
params: params,
|
||||
managed: true,
|
||||
})
|
||||
|
@ -42,7 +43,7 @@ func (daemon *Daemon) CreateManagedContainer(ctx context.Context, params types.C
|
|||
|
||||
// ContainerCreate creates a regular container
|
||||
func (daemon *Daemon) ContainerCreate(ctx context.Context, params types.ContainerCreateConfig) (containertypes.CreateResponse, error) {
|
||||
return daemon.containerCreate(ctx, createOpts{
|
||||
return daemon.containerCreate(ctx, daemon.config(), createOpts{
|
||||
params: params,
|
||||
})
|
||||
}
|
||||
|
@ -50,19 +51,19 @@ func (daemon *Daemon) ContainerCreate(ctx context.Context, params types.Containe
|
|||
// ContainerCreateIgnoreImagesArgsEscaped creates a regular container. This is called from the builder RUN case
|
||||
// and ensures that we do not take the images ArgsEscaped
|
||||
func (daemon *Daemon) ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context, params types.ContainerCreateConfig) (containertypes.CreateResponse, error) {
|
||||
return daemon.containerCreate(ctx, createOpts{
|
||||
return daemon.containerCreate(ctx, daemon.config(), createOpts{
|
||||
params: params,
|
||||
ignoreImagesArgsEscaped: true,
|
||||
})
|
||||
}
|
||||
|
||||
func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (containertypes.CreateResponse, error) {
|
||||
func (daemon *Daemon) containerCreate(ctx context.Context, daemonCfg *config.Config, opts createOpts) (containertypes.CreateResponse, error) {
|
||||
start := time.Now()
|
||||
if opts.params.Config == nil {
|
||||
return containertypes.CreateResponse{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container"))
|
||||
}
|
||||
|
||||
warnings, err := daemon.verifyContainerSettings(opts.params.HostConfig, opts.params.Config, false)
|
||||
warnings, err := daemon.verifyContainerSettings(daemonCfg, opts.params.HostConfig, opts.params.Config, false)
|
||||
if err != nil {
|
||||
return containertypes.CreateResponse{Warnings: warnings}, errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
@ -94,12 +95,12 @@ func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (con
|
|||
if opts.params.HostConfig == nil {
|
||||
opts.params.HostConfig = &containertypes.HostConfig{}
|
||||
}
|
||||
err = daemon.adaptContainerSettings(opts.params.HostConfig, opts.params.AdjustCPUShares)
|
||||
err = daemon.adaptContainerSettings(daemonCfg, opts.params.HostConfig, opts.params.AdjustCPUShares)
|
||||
if err != nil {
|
||||
return containertypes.CreateResponse{Warnings: warnings}, errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
ctr, err := daemon.create(ctx, opts)
|
||||
ctr, err := daemon.create(ctx, daemonCfg, opts)
|
||||
if err != nil {
|
||||
return containertypes.CreateResponse{Warnings: warnings}, err
|
||||
}
|
||||
|
@ -113,7 +114,7 @@ func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (con
|
|||
}
|
||||
|
||||
// Create creates a new container from the given configuration with a given name.
|
||||
func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *container.Container, retErr error) {
|
||||
func (daemon *Daemon) create(ctx context.Context, daemonCfg *config.Config, opts createOpts) (retC *container.Container, retErr error) {
|
||||
var (
|
||||
ctr *container.Container
|
||||
img *image.Image
|
||||
|
@ -175,7 +176,7 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai
|
|||
}
|
||||
}()
|
||||
|
||||
if err := daemon.setSecurityOptions(ctr, opts.params.HostConfig); err != nil {
|
||||
if err := daemon.setSecurityOptions(daemonCfg, ctr, opts.params.HostConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
115
daemon/daemon.go
115
daemon/daemon.go
|
@ -16,6 +16,7 @@ import (
|
|||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
|
@ -84,7 +85,8 @@ type Daemon struct {
|
|||
containersReplica *container.ViewDB
|
||||
execCommands *container.ExecStore
|
||||
imageService ImageService
|
||||
configStore *config.Config
|
||||
configStore atomic.Pointer[config.Config]
|
||||
configReload sync.Mutex
|
||||
statsCollector *stats.Collector
|
||||
defaultLogConfig containertypes.LogConfig
|
||||
registryService *registry.Service
|
||||
|
@ -148,20 +150,31 @@ func (daemon *Daemon) StoreHosts(hosts []string) {
|
|||
}
|
||||
}
|
||||
|
||||
// config returns an immutable snapshot of the current daemon configuration.
|
||||
// Multiple calls to this function will return the same pointer until the
|
||||
// configuration is reloaded so callers must take care not to modify the
|
||||
// returned value.
|
||||
//
|
||||
// To ensure that the configuration used remains consistent throughout the
|
||||
// lifetime of an operation, the configuration pointer should be passed down the
|
||||
// call stack, like one would a [context.Context] value. Only the entrypoints
|
||||
// for operations, the outermost functions, should call this function.
|
||||
func (daemon *Daemon) config() *config.Config {
|
||||
cfg := daemon.configStore.Load()
|
||||
if cfg == nil {
|
||||
return &config.Config{}
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// HasExperimental returns whether the experimental features of the daemon are enabled or not
|
||||
func (daemon *Daemon) HasExperimental() bool {
|
||||
return daemon.configStore != nil && daemon.configStore.Experimental
|
||||
return daemon.config().Experimental
|
||||
}
|
||||
|
||||
// Features returns the features map from configStore
|
||||
func (daemon *Daemon) Features() map[string]bool {
|
||||
daemon.configStore.Lock()
|
||||
defer daemon.configStore.Unlock()
|
||||
f := make(map[string]bool, len(daemon.configStore.Features))
|
||||
for k, v := range daemon.configStore.Features {
|
||||
f[k] = v
|
||||
}
|
||||
return f
|
||||
return daemon.config().Features
|
||||
}
|
||||
|
||||
// UsesSnapshotter returns true if feature flag to use containerd snapshotter is enabled
|
||||
|
@ -172,17 +185,14 @@ func (daemon *Daemon) UsesSnapshotter() bool {
|
|||
// RegistryHosts returns the registry hosts configuration for the host component
|
||||
// of a distribution image reference.
|
||||
func (daemon *Daemon) RegistryHosts(host string) ([]docker.RegistryHost, error) {
|
||||
daemon.configStore.Lock()
|
||||
serviceOpts := daemon.configStore.ServiceOptions
|
||||
daemon.configStore.Unlock()
|
||||
|
||||
var (
|
||||
conf = daemon.config()
|
||||
registryKey = "docker.io"
|
||||
mirrors = make([]string, len(serviceOpts.Mirrors))
|
||||
mirrors = make([]string, len(conf.Mirrors))
|
||||
m = map[string]resolverconfig.RegistryConfig{}
|
||||
)
|
||||
// must trim "https://" or "http://" prefix
|
||||
for i, v := range serviceOpts.Mirrors {
|
||||
for i, v := range conf.Mirrors {
|
||||
if uri, err := url.Parse(v); err == nil {
|
||||
v = uri.Host
|
||||
}
|
||||
|
@ -191,7 +201,7 @@ func (daemon *Daemon) RegistryHosts(host string) ([]docker.RegistryHost, error)
|
|||
// set mirrors for default registry
|
||||
m[registryKey] = resolverconfig.RegistryConfig{Mirrors: mirrors}
|
||||
|
||||
for _, v := range serviceOpts.InsecureRegistries {
|
||||
for _, v := range conf.InsecureRegistries {
|
||||
u, err := url.Parse(v)
|
||||
if err != nil && !strings.HasPrefix(v, "http://") && !strings.HasPrefix(v, "https://") {
|
||||
originalErr := err
|
||||
|
@ -237,7 +247,7 @@ type layerAccessor interface {
|
|||
GetLayerByID(cid string) (layer.RWLayer, error)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) restore() error {
|
||||
func (daemon *Daemon) restore(cfg *config.Config) error {
|
||||
var mapLock sync.Mutex
|
||||
containers := make(map[string]*container.Container)
|
||||
|
||||
|
@ -377,7 +387,7 @@ func (daemon *Daemon) restore() error {
|
|||
logger(c).WithError(err).Error("failed to delete task from containerd")
|
||||
return
|
||||
}
|
||||
} else if !daemon.configStore.LiveRestoreEnabled {
|
||||
} else if !cfg.LiveRestoreEnabled {
|
||||
logger(c).Debug("shutting down container considered alive by containerd")
|
||||
if err := daemon.shutdownContainer(c); err != nil && !errdefs.IsNotFound(err) {
|
||||
log.WithError(err).Error("error shutting down container")
|
||||
|
@ -457,7 +467,7 @@ func (daemon *Daemon) restore() error {
|
|||
|
||||
c.ResetRestartManager(false)
|
||||
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
|
||||
options, err := daemon.buildSandboxOptions(c)
|
||||
options, err := daemon.buildSandboxOptions(cfg, c)
|
||||
if err != nil {
|
||||
logger(c).WithError(err).Warn("failed to build sandbox option to restore container")
|
||||
}
|
||||
|
@ -475,7 +485,7 @@ func (daemon *Daemon) restore() error {
|
|||
// not initialized yet. We will start
|
||||
// it after the cluster is
|
||||
// initialized.
|
||||
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
|
||||
if cfg.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
|
||||
mapLock.Lock()
|
||||
restartContainers[c] = make(chan struct{})
|
||||
mapLock.Unlock()
|
||||
|
@ -513,7 +523,7 @@ func (daemon *Daemon) restore() error {
|
|||
//
|
||||
// Note that we cannot initialize the network controller earlier, as it
|
||||
// needs to know if there's active sandboxes (running containers).
|
||||
if err = daemon.initNetworkController(activeSandboxes); err != nil {
|
||||
if err = daemon.initNetworkController(cfg, activeSandboxes); err != nil {
|
||||
return fmt.Errorf("Error initializing network controller: %v", err)
|
||||
}
|
||||
|
||||
|
@ -560,7 +570,7 @@ func (daemon *Daemon) restore() error {
|
|||
if err := daemon.prepareMountPoints(c); err != nil {
|
||||
log.WithError(err).Error("failed to prepare mount points for container")
|
||||
}
|
||||
if err := daemon.containerStart(context.Background(), c, "", "", true); err != nil {
|
||||
if err := daemon.containerStart(context.Background(), cfg, c, "", "", true); err != nil {
|
||||
log.WithError(err).Error("failed to start container")
|
||||
}
|
||||
close(chNotify)
|
||||
|
@ -576,7 +586,7 @@ func (daemon *Daemon) restore() error {
|
|||
go func(cid string) {
|
||||
_ = sem.Acquire(context.Background(), 1)
|
||||
|
||||
if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
||||
if err := daemon.containerRm(cfg, cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
||||
logrus.WithField("container", cid).WithError(err).Error("failed to remove container")
|
||||
}
|
||||
|
||||
|
@ -624,7 +634,7 @@ func (daemon *Daemon) restore() error {
|
|||
|
||||
// RestartSwarmContainers restarts any autostart container which has a
|
||||
// swarm endpoint.
|
||||
func (daemon *Daemon) RestartSwarmContainers() {
|
||||
func (daemon *Daemon) RestartSwarmContainers(cfg *config.Config) {
|
||||
ctx := context.Background()
|
||||
|
||||
// parallelLimit is the maximum number of parallel startup jobs that we
|
||||
|
@ -642,7 +652,7 @@ func (daemon *Daemon) RestartSwarmContainers() {
|
|||
// Autostart all the containers which has a
|
||||
// swarm endpoint now that the cluster is
|
||||
// initialized.
|
||||
if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
|
||||
if cfg.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
|
||||
group.Add(1)
|
||||
go func(c *container.Container) {
|
||||
if err := sem.Acquire(ctx, 1); err != nil {
|
||||
|
@ -651,7 +661,7 @@ func (daemon *Daemon) RestartSwarmContainers() {
|
|||
return
|
||||
}
|
||||
|
||||
if err := daemon.containerStart(ctx, c, "", "", true); err != nil {
|
||||
if err := daemon.containerStart(ctx, cfg, c, "", "", true); err != nil {
|
||||
logrus.WithField("container", c.ID).WithError(err).Error("failed to start swarm container")
|
||||
}
|
||||
|
||||
|
@ -735,10 +745,7 @@ func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {
|
|||
// IsSwarmCompatible verifies if the current daemon
|
||||
// configuration is compatible with the swarm mode
|
||||
func (daemon *Daemon) IsSwarmCompatible() error {
|
||||
if daemon.configStore == nil {
|
||||
return nil
|
||||
}
|
||||
return daemon.configStore.IsSwarmCompatible()
|
||||
return daemon.config().IsSwarmCompatible()
|
||||
}
|
||||
|
||||
// NewDaemon sets up everything for the daemon to be able to service
|
||||
|
@ -800,10 +807,10 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
}
|
||||
|
||||
d := &Daemon{
|
||||
configStore: config,
|
||||
PluginStore: pluginStore,
|
||||
startupDone: make(chan struct{}),
|
||||
}
|
||||
d.configStore.Store(config)
|
||||
|
||||
// TEST_INTEGRATION_USE_SNAPSHOTTER is used for integration tests only.
|
||||
if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" {
|
||||
|
@ -834,12 +841,12 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
}
|
||||
d.setupDumpStackTrap(stackDumpDir)
|
||||
|
||||
if err := d.setupSeccompProfile(); err != nil {
|
||||
if err := d.setupSeccompProfile(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the default isolation mode (only applicable on Windows)
|
||||
if err := d.setDefaultIsolation(); err != nil {
|
||||
if err := d.setDefaultIsolation(config); err != nil {
|
||||
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
|
||||
}
|
||||
|
||||
|
@ -881,7 +888,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
d.registryService = registryService
|
||||
dlogger.RegisterPluginGetter(d.PluginStore)
|
||||
|
||||
metricsSockPath, err := d.listenMetricsSock()
|
||||
metricsSockPath, err := d.listenMetricsSock(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -942,7 +949,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
shimOpts interface{}
|
||||
)
|
||||
if runtime.GOOS != "windows" {
|
||||
shim, shimOpts, err = d.getRuntime(config.GetDefaultRuntimeName())
|
||||
shim, shimOpts, err = d.getRuntime(config, config.DefaultRuntime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -965,9 +972,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
return nil, errors.Wrap(err, "couldn't create plugin manager")
|
||||
}
|
||||
|
||||
if err := d.setupDefaultLogConfig(); err != nil {
|
||||
return nil, err
|
||||
d.defaultLogConfig, err = defaultLogConfig(config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to set log opts")
|
||||
}
|
||||
logrus.Debugf("Using default logging driver %s", d.defaultLogConfig.Type)
|
||||
|
||||
d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d)
|
||||
if err != nil {
|
||||
|
@ -982,7 +991,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
// at this point.
|
||||
//
|
||||
// TODO(thaJeztah) add a utility to only collect the CgroupDevicesEnabled information
|
||||
if runtime.GOOS == "linux" && !userns.RunningInUserNS() && !getSysInfo(d).CgroupDevicesEnabled {
|
||||
if runtime.GOOS == "linux" && !userns.RunningInUserNS() && !getSysInfo(config).CgroupDevicesEnabled {
|
||||
return nil, errors.New("Devices cgroup isn't mounted")
|
||||
}
|
||||
|
||||
|
@ -1135,11 +1144,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
|
||||
go d.execCommandGC()
|
||||
|
||||
if err := d.initLibcontainerd(ctx); err != nil {
|
||||
if err := d.initLibcontainerd(ctx, config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := d.restore(); err != nil {
|
||||
if err := d.restore(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
close(d.startupDone)
|
||||
|
@ -1201,7 +1210,11 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error {
|
|||
// A negative (-1) timeout means "indefinitely", which means that containers
|
||||
// are not forcibly killed, and the daemon shuts down after all containers exit.
|
||||
func (daemon *Daemon) ShutdownTimeout() int {
|
||||
shutdownTimeout := daemon.configStore.ShutdownTimeout
|
||||
return daemon.shutdownTimeout(daemon.config())
|
||||
}
|
||||
|
||||
func (daemon *Daemon) shutdownTimeout(cfg *config.Config) int {
|
||||
shutdownTimeout := cfg.ShutdownTimeout
|
||||
if shutdownTimeout < 0 {
|
||||
return -1
|
||||
}
|
||||
|
@ -1228,7 +1241,8 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
|
|||
// Keep mounts and networking running on daemon shutdown if
|
||||
// we are to keep containers running and restore them.
|
||||
|
||||
if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
|
||||
cfg := daemon.config()
|
||||
if cfg.LiveRestoreEnabled && daemon.containers != nil {
|
||||
// check if there are any running containers, if none we should do some cleanup
|
||||
if ls, err := daemon.Containers(ctx, &types.ContainerListOptions{}); len(ls) != 0 || err != nil {
|
||||
// metrics plugins still need some cleanup
|
||||
|
@ -1238,8 +1252,8 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
|
|||
}
|
||||
|
||||
if daemon.containers != nil {
|
||||
logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout)
|
||||
logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout())
|
||||
logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", cfg.ShutdownTimeout)
|
||||
logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.shutdownTimeout(cfg))
|
||||
daemon.containers.ApplyAll(func(c *container.Container) {
|
||||
if !c.IsRunning() {
|
||||
return
|
||||
|
@ -1293,7 +1307,7 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
|
|||
daemon.mdDB.Close()
|
||||
}
|
||||
|
||||
return daemon.cleanupMounts()
|
||||
return daemon.cleanupMounts(cfg)
|
||||
}
|
||||
|
||||
// Mount sets container.BaseFS
|
||||
|
@ -1374,15 +1388,10 @@ func isBridgeNetworkDisabled(conf *config.Config) bool {
|
|||
return conf.BridgeConfig.Iface == config.DisableNetworkBridge
|
||||
}
|
||||
|
||||
func (daemon *Daemon) networkOptions(pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
|
||||
options := []nwconfig.Option{}
|
||||
if daemon.configStore == nil {
|
||||
return options, nil
|
||||
}
|
||||
conf := daemon.configStore
|
||||
func (daemon *Daemon) networkOptions(conf *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
|
||||
dd := runconfig.DefaultDaemonNetworkMode()
|
||||
|
||||
options = []nwconfig.Option{
|
||||
options := []nwconfig.Option{
|
||||
nwconfig.OptionDataDir(conf.Root),
|
||||
nwconfig.OptionExecRoot(conf.GetExecRoot()),
|
||||
nwconfig.OptionDefaultDriver(string(dd)),
|
||||
|
@ -1514,7 +1523,7 @@ func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo {
|
|||
// We check if sysInfo is not set here, to allow some test to
|
||||
// override the actual sysInfo.
|
||||
if daemon.sysInfo == nil {
|
||||
daemon.sysInfo = getSysInfo(daemon)
|
||||
daemon.sysInfo = getSysInfo(daemon.config())
|
||||
}
|
||||
})
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u
|
|||
}
|
||||
|
||||
// cleanupMounts umounts used by container resources and the daemon root mount
|
||||
func (daemon *Daemon) cleanupMounts() error {
|
||||
func (daemon *Daemon) cleanupMounts(cfg *config.Config) error {
|
||||
if err := daemon.cleanupMountsByID(""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ func (daemon *Daemon) cleanupMounts() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
unmountFile := getUnmountOnShutdownPath(daemon.configStore)
|
||||
unmountFile := getUnmountOnShutdownPath(cfg)
|
||||
if _, err := os.Stat(unmountFile); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -239,14 +239,14 @@ func kernelSupportsRecursivelyReadOnly() error {
|
|||
return kernelSupportsRROErr
|
||||
}
|
||||
|
||||
func (daemon *Daemon) supportsRecursivelyReadOnly(runtime string) error {
|
||||
func supportsRecursivelyReadOnly(cfg *config.Config, runtime string) error {
|
||||
if err := kernelSupportsRecursivelyReadOnly(); err != nil {
|
||||
return fmt.Errorf("rro is not supported: %w (kernel is older than 5.12?)", err)
|
||||
}
|
||||
if runtime == "" {
|
||||
runtime = daemon.configStore.GetDefaultRuntimeName()
|
||||
runtime = cfg.DefaultRuntime
|
||||
}
|
||||
rt := daemon.configStore.GetRuntime(runtime)
|
||||
rt := cfg.GetRuntime(runtime)
|
||||
if rt.Features == nil {
|
||||
return fmt.Errorf("rro is not supported by runtime %q: OCI features struct is not available", runtime)
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ func TestNotCleanupMounts(t *testing.T) {
|
|||
func TestValidateContainerIsolationLinux(t *testing.T) {
|
||||
d := Daemon{}
|
||||
|
||||
_, err := d.verifyContainerSettings(&containertypes.HostConfig{Isolation: containertypes.IsolationHyperV}, nil, false)
|
||||
_, err := d.verifyContainerSettings(&config.Config{}, &containertypes.HostConfig{Isolation: containertypes.IsolationHyperV}, nil, false)
|
||||
assert.Check(t, is.Error(err, "invalid isolation 'hyperv' on linux"))
|
||||
}
|
||||
|
||||
|
@ -264,7 +264,8 @@ func TestRootMountCleanup(t *testing.T) {
|
|||
err = os.Mkdir(cfg.Root, 0755)
|
||||
assert.NilError(t, err)
|
||||
|
||||
d := &Daemon{configStore: cfg, root: cfg.Root}
|
||||
d := &Daemon{root: cfg.Root}
|
||||
d.configStore.Store(cfg)
|
||||
unmountFile := getUnmountOnShutdownPath(cfg)
|
||||
|
||||
t.Run("regular dir no mountpoint", func(t *testing.T) {
|
||||
|
@ -274,7 +275,7 @@ func TestRootMountCleanup(t *testing.T) {
|
|||
assert.NilError(t, err)
|
||||
checkMounted(t, cfg.Root, true)
|
||||
|
||||
assert.Assert(t, d.cleanupMounts())
|
||||
assert.Assert(t, d.cleanupMounts(cfg))
|
||||
checkMounted(t, cfg.Root, false)
|
||||
|
||||
_, err = os.Stat(unmountFile)
|
||||
|
@ -292,7 +293,7 @@ func TestRootMountCleanup(t *testing.T) {
|
|||
|
||||
_, err = os.Stat(unmountFile)
|
||||
assert.Assert(t, os.IsNotExist(err))
|
||||
assert.Assert(t, d.cleanupMounts())
|
||||
assert.Assert(t, d.cleanupMounts(cfg))
|
||||
checkMounted(t, cfg.Root, true)
|
||||
})
|
||||
|
||||
|
@ -309,7 +310,7 @@ func TestRootMountCleanup(t *testing.T) {
|
|||
t.Fatal("unmount file should not exist")
|
||||
}
|
||||
|
||||
assert.Assert(t, d.cleanupMounts())
|
||||
assert.Assert(t, d.cleanupMounts(cfg))
|
||||
checkMounted(t, cfg.Root, true)
|
||||
assert.Assert(t, mount.Unmount(cfg.Root))
|
||||
})
|
||||
|
@ -328,7 +329,7 @@ func TestRootMountCleanup(t *testing.T) {
|
|||
_, err = os.Stat(unmountFile)
|
||||
assert.Check(t, os.IsNotExist(err), err)
|
||||
checkMounted(t, cfg.Root, false)
|
||||
assert.Assert(t, d.cleanupMounts())
|
||||
assert.Assert(t, d.cleanupMounts(cfg))
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
|
@ -300,7 +301,7 @@ func TestMerge(t *testing.T) {
|
|||
func TestValidateContainerIsolation(t *testing.T) {
|
||||
d := Daemon{}
|
||||
|
||||
_, err := d.verifyContainerSettings(&containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false)
|
||||
_, err := d.verifyContainerSettings(&config.Config{}, &containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false)
|
||||
assert.Check(t, is.Error(err, "invalid isolation 'invalid' on "+runtime.GOOS))
|
||||
}
|
||||
|
||||
|
|
|
@ -189,8 +189,8 @@ func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeight
|
|||
return blkioWeightDevices, nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) parseSecurityOpt(securityOptions *container.SecurityOptions, hostConfig *containertypes.HostConfig) error {
|
||||
securityOptions.NoNewPrivileges = daemon.configStore.NoNewPrivileges
|
||||
func (daemon *Daemon) parseSecurityOpt(cfg *config.Config, securityOptions *container.SecurityOptions, hostConfig *containertypes.HostConfig) error {
|
||||
securityOptions.NoNewPrivileges = cfg.NoNewPrivileges
|
||||
return parseSecurityOpt(securityOptions, hostConfig)
|
||||
}
|
||||
|
||||
|
@ -299,7 +299,7 @@ func adjustParallelLimit(n int, limit int) int {
|
|||
|
||||
// adaptContainerSettings is called during container creation to modify any
|
||||
// settings necessary in the HostConfig structure.
|
||||
func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
|
||||
func (daemon *Daemon) adaptContainerSettings(daemonCfg *config.Config, hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
|
||||
if adjustCPUShares && hostConfig.CPUShares > 0 {
|
||||
// Handle unsupported CPUShares
|
||||
if hostConfig.CPUShares < linuxMinCPUShares {
|
||||
|
@ -316,15 +316,15 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf
|
|||
}
|
||||
if hostConfig.ShmSize == 0 {
|
||||
hostConfig.ShmSize = config.DefaultShmSize
|
||||
if daemon.configStore != nil {
|
||||
hostConfig.ShmSize = int64(daemon.configStore.ShmSize)
|
||||
if daemonCfg != nil {
|
||||
hostConfig.ShmSize = int64(daemonCfg.ShmSize)
|
||||
}
|
||||
}
|
||||
// Set default IPC mode, if unset for container
|
||||
if hostConfig.IpcMode.IsEmpty() {
|
||||
m := config.DefaultIpcMode
|
||||
if daemon.configStore != nil {
|
||||
m = containertypes.IpcMode(daemon.configStore.IpcMode)
|
||||
if daemonCfg != nil {
|
||||
m = containertypes.IpcMode(daemonCfg.IpcMode)
|
||||
}
|
||||
hostConfig.IpcMode = m
|
||||
}
|
||||
|
@ -340,8 +340,8 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf
|
|||
if cgroups.Mode() == cgroups.Unified {
|
||||
m = containertypes.CgroupnsModePrivate
|
||||
}
|
||||
if daemon.configStore != nil {
|
||||
m = containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode)
|
||||
if daemonCfg != nil {
|
||||
m = containertypes.CgroupnsMode(daemonCfg.CgroupNamespaceMode)
|
||||
}
|
||||
hostConfig.CgroupnsMode = m
|
||||
}
|
||||
|
@ -566,11 +566,11 @@ func verifyPlatformContainerResources(resources *containertypes.Resources, sysIn
|
|||
return warnings, nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) getCgroupDriver() string {
|
||||
if UsingSystemd(daemon.configStore) {
|
||||
func cgroupDriver(cfg *config.Config) string {
|
||||
if UsingSystemd(cfg) {
|
||||
return cgroupSystemdDriver
|
||||
}
|
||||
if daemon.Rootless() {
|
||||
if cfg.Rootless {
|
||||
return cgroupNoneDriver
|
||||
}
|
||||
return cgroupFsDriver
|
||||
|
@ -639,7 +639,7 @@ func isRunningSystemd() bool {
|
|||
|
||||
// verifyPlatformContainerSettings performs platform-specific validation of the
|
||||
// hostconfig and config structures.
|
||||
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
|
||||
func verifyPlatformContainerSettings(daemon *Daemon, daemonCfg *config.Config, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
|
||||
if hostConfig == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -680,7 +680,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.
|
|||
}
|
||||
|
||||
// check for various conflicting options with user namespaces
|
||||
if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() {
|
||||
if daemonCfg.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() {
|
||||
if hostConfig.Privileged {
|
||||
return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode")
|
||||
}
|
||||
|
@ -691,17 +691,17 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.
|
|||
return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled")
|
||||
}
|
||||
}
|
||||
if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) {
|
||||
if hostConfig.CgroupParent != "" && UsingSystemd(daemonCfg) {
|
||||
// CgroupParent for systemd cgroup should be named as "xxx.slice"
|
||||
if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") {
|
||||
return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
|
||||
}
|
||||
}
|
||||
if hostConfig.Runtime == "" {
|
||||
hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
|
||||
hostConfig.Runtime = daemonCfg.DefaultRuntime
|
||||
}
|
||||
|
||||
if _, _, err := daemon.getRuntime(hostConfig.Runtime); err != nil {
|
||||
if _, _, err := daemon.getRuntime(daemonCfg, hostConfig.Runtime); err != nil {
|
||||
return warnings, err
|
||||
}
|
||||
|
||||
|
@ -756,7 +756,7 @@ func verifyDaemonSettings(conf *config.Config) error {
|
|||
}
|
||||
|
||||
configureRuntimes(conf)
|
||||
if rtName := conf.GetDefaultRuntimeName(); rtName != "" {
|
||||
if rtName := conf.DefaultRuntime; rtName != "" {
|
||||
if conf.GetRuntime(rtName) == nil {
|
||||
if !config.IsPermissibleC8dRuntimeName(rtName) {
|
||||
return fmt.Errorf("specified default runtime '%s' does not exist", rtName)
|
||||
|
@ -837,8 +837,8 @@ func configureKernelSecuritySupport(config *config.Config, driverName string) er
|
|||
// initNetworkController initializes the libnetwork controller and configures
|
||||
// network settings. If there's active sandboxes, configuration changes will not
|
||||
// take effect.
|
||||
func (daemon *Daemon) initNetworkController(activeSandboxes map[string]interface{}) error {
|
||||
netOptions, err := daemon.networkOptions(daemon.PluginStore, activeSandboxes)
|
||||
func (daemon *Daemon) initNetworkController(cfg *config.Config, activeSandboxes map[string]interface{}) error {
|
||||
netOptions, err := daemon.networkOptions(cfg, daemon.PluginStore, activeSandboxes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -850,12 +850,12 @@ func (daemon *Daemon) initNetworkController(activeSandboxes map[string]interface
|
|||
|
||||
if len(activeSandboxes) > 0 {
|
||||
logrus.Info("there are running containers, updated network configuration will not take affect")
|
||||
} else if err := configureNetworking(daemon.netController, daemon.configStore); err != nil {
|
||||
} else if err := configureNetworking(daemon.netController, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set HostGatewayIP to the default bridge's IP if it is empty
|
||||
setHostGatewayIP(daemon.netController, daemon.configStore)
|
||||
setHostGatewayIP(daemon.netController, cfg)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1410,7 +1410,7 @@ func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container
|
|||
|
||||
// setDefaultIsolation determines the default isolation mode for the
|
||||
// daemon to run in. This is only applicable on Windows
|
||||
func (daemon *Daemon) setDefaultIsolation() error {
|
||||
func (daemon *Daemon) setDefaultIsolation(*config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1443,14 +1443,14 @@ func setMayDetachMounts() error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (daemon *Daemon) initCPURtController(mnt, path string) error {
|
||||
func (daemon *Daemon) initCPURtController(cfg *config.Config, mnt, path string) error {
|
||||
if path == "/" || path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recursively create cgroup to ensure that the system and all parent cgroups have values set
|
||||
// for the period and runtime as this limits what the children can be set to.
|
||||
if err := daemon.initCPURtController(mnt, filepath.Dir(path)); err != nil {
|
||||
if err := daemon.initCPURtController(cfg, mnt, filepath.Dir(path)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1458,10 +1458,10 @@ func (daemon *Daemon) initCPURtController(mnt, path string) error {
|
|||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
|
||||
if err := maybeCreateCPURealTimeFile(cfg.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
|
||||
return err
|
||||
}
|
||||
return maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
|
||||
return maybeCreateCPURealTimeFile(cfg.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
|
||||
}
|
||||
|
||||
func maybeCreateCPURealTimeFile(configValue int64, file string, path string) error {
|
||||
|
@ -1471,8 +1471,8 @@ func maybeCreateCPURealTimeFile(configValue int64, file string, path string) err
|
|||
return os.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setupSeccompProfile() error {
|
||||
switch profile := daemon.configStore.SeccompProfile; profile {
|
||||
func (daemon *Daemon) setupSeccompProfile(cfg *config.Config) error {
|
||||
switch profile := cfg.SeccompProfile; profile {
|
||||
case "", config.SeccompProfileDefault:
|
||||
daemon.seccompProfilePath = config.SeccompProfileDefault
|
||||
case config.SeccompProfileUnconfined:
|
||||
|
@ -1488,9 +1488,9 @@ func (daemon *Daemon) setupSeccompProfile() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getSysInfo(daemon *Daemon) *sysinfo.SysInfo {
|
||||
func getSysInfo(cfg *config.Config) *sysinfo.SysInfo {
|
||||
var siOpts []sysinfo.Opt
|
||||
if daemon.getCgroupDriver() == cgroupSystemdDriver {
|
||||
if cgroupDriver(cfg) == cgroupSystemdDriver {
|
||||
if euid := os.Getenv("ROOTLESSKIT_PARENT_EUID"); euid != "" {
|
||||
siOpts = append(siOpts, sysinfo.WithCgroup2GroupPath("/user.slice/user-"+euid+".slice"))
|
||||
}
|
||||
|
@ -1498,13 +1498,13 @@ func getSysInfo(daemon *Daemon) *sysinfo.SysInfo {
|
|||
return sysinfo.New(siOpts...)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) initLibcontainerd(ctx context.Context) error {
|
||||
func (daemon *Daemon) initLibcontainerd(ctx context.Context, cfg *config.Config) error {
|
||||
var err error
|
||||
daemon.containerd, err = remote.NewClient(
|
||||
ctx,
|
||||
daemon.containerdCli,
|
||||
filepath.Join(daemon.configStore.ExecRoot, "containerd"),
|
||||
daemon.configStore.ContainerdNamespace,
|
||||
filepath.Join(cfg.ExecRoot, "containerd"),
|
||||
cfg.ContainerdNamespace,
|
||||
daemon,
|
||||
)
|
||||
return err
|
||||
|
|
|
@ -68,30 +68,31 @@ func TestAdjustCPUShares(t *testing.T) {
|
|||
repository: tmp,
|
||||
root: tmp,
|
||||
}
|
||||
cfg := &config.Config{}
|
||||
muteLogs()
|
||||
|
||||
hostConfig := &containertypes.HostConfig{
|
||||
Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1},
|
||||
}
|
||||
daemon.adaptContainerSettings(hostConfig, true)
|
||||
daemon.adaptContainerSettings(cfg, hostConfig, true)
|
||||
if hostConfig.CPUShares != linuxMinCPUShares {
|
||||
t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares)
|
||||
}
|
||||
|
||||
hostConfig.CPUShares = linuxMaxCPUShares + 1
|
||||
daemon.adaptContainerSettings(hostConfig, true)
|
||||
daemon.adaptContainerSettings(cfg, hostConfig, true)
|
||||
if hostConfig.CPUShares != linuxMaxCPUShares {
|
||||
t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares)
|
||||
}
|
||||
|
||||
hostConfig.CPUShares = 0
|
||||
daemon.adaptContainerSettings(hostConfig, true)
|
||||
daemon.adaptContainerSettings(cfg, hostConfig, true)
|
||||
if hostConfig.CPUShares != 0 {
|
||||
t.Error("Expected CPUShares to be unchanged")
|
||||
}
|
||||
|
||||
hostConfig.CPUShares = 1024
|
||||
daemon.adaptContainerSettings(hostConfig, true)
|
||||
daemon.adaptContainerSettings(cfg, hostConfig, true)
|
||||
if hostConfig.CPUShares != 1024 {
|
||||
t.Error("Expected CPUShares to be unchanged")
|
||||
}
|
||||
|
@ -108,29 +109,30 @@ func TestAdjustCPUSharesNoAdjustment(t *testing.T) {
|
|||
repository: tmp,
|
||||
root: tmp,
|
||||
}
|
||||
cfg := &config.Config{}
|
||||
|
||||
hostConfig := &containertypes.HostConfig{
|
||||
Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1},
|
||||
}
|
||||
daemon.adaptContainerSettings(hostConfig, false)
|
||||
daemon.adaptContainerSettings(cfg, hostConfig, false)
|
||||
if hostConfig.CPUShares != linuxMinCPUShares-1 {
|
||||
t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1)
|
||||
}
|
||||
|
||||
hostConfig.CPUShares = linuxMaxCPUShares + 1
|
||||
daemon.adaptContainerSettings(hostConfig, false)
|
||||
daemon.adaptContainerSettings(cfg, hostConfig, false)
|
||||
if hostConfig.CPUShares != linuxMaxCPUShares+1 {
|
||||
t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1)
|
||||
}
|
||||
|
||||
hostConfig.CPUShares = 0
|
||||
daemon.adaptContainerSettings(hostConfig, false)
|
||||
daemon.adaptContainerSettings(cfg, hostConfig, false)
|
||||
if hostConfig.CPUShares != 0 {
|
||||
t.Error("Expected CPUShares to be unchanged")
|
||||
}
|
||||
|
||||
hostConfig.CPUShares = 1024
|
||||
daemon.adaptContainerSettings(hostConfig, false)
|
||||
daemon.adaptContainerSettings(cfg, hostConfig, false)
|
||||
if hostConfig.CPUShares != 1024 {
|
||||
t.Error("Expected CPUShares to be unchanged")
|
||||
}
|
||||
|
@ -243,16 +245,16 @@ func TestParseSecurityOpt(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseNNPSecurityOptions(t *testing.T) {
|
||||
daemon := &Daemon{
|
||||
configStore: &config.Config{NoNewPrivileges: true},
|
||||
}
|
||||
daemonCfg := &config.Config{NoNewPrivileges: true}
|
||||
daemon := &Daemon{}
|
||||
daemon.configStore.Store(daemonCfg)
|
||||
opts := &container.SecurityOptions{}
|
||||
cfg := &containertypes.HostConfig{}
|
||||
|
||||
// test NNP when "daemon:true" and "no-new-privileges=false""
|
||||
cfg.SecurityOpt = []string{"no-new-privileges=false"}
|
||||
|
||||
if err := daemon.parseSecurityOpt(opts, cfg); err != nil {
|
||||
if err := daemon.parseSecurityOpt(daemonCfg, opts, cfg); err != nil {
|
||||
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
|
||||
}
|
||||
if opts.NoNewPrivileges {
|
||||
|
@ -260,10 +262,10 @@ func TestParseNNPSecurityOptions(t *testing.T) {
|
|||
}
|
||||
|
||||
// test NNP when "daemon:false" and "no-new-privileges=true""
|
||||
daemon.configStore.NoNewPrivileges = false
|
||||
daemonCfg.NoNewPrivileges = false
|
||||
cfg.SecurityOpt = []string{"no-new-privileges=true"}
|
||||
|
||||
if err := daemon.parseSecurityOpt(opts, cfg); err != nil {
|
||||
if err := daemon.parseSecurityOpt(daemonCfg, opts, cfg); err != nil {
|
||||
t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err)
|
||||
}
|
||||
if !opts.NoNewPrivileges {
|
||||
|
|
|
@ -17,7 +17,3 @@ func setupResolvConf(_ *interface{}) {}
|
|||
func getSysInfo(_ *Daemon) *sysinfo.SysInfo {
|
||||
return sysinfo.New()
|
||||
}
|
||||
|
||||
func (daemon *Daemon) supportsRecursivelyReadOnly(_ string) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ func getPluginExecRoot(cfg *config.Config) string {
|
|||
return filepath.Join(cfg.Root, "plugins")
|
||||
}
|
||||
|
||||
func (daemon *Daemon) parseSecurityOpt(securityOptions *container.SecurityOptions, hostConfig *containertypes.HostConfig) error {
|
||||
func (daemon *Daemon) parseSecurityOpt(daemonCfg *config.Config, securityOptions *container.SecurityOptions, hostConfig *containertypes.HostConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ func setupInitLayer(idMapping idtools.IdentityMapping) func(string) error {
|
|||
|
||||
// adaptContainerSettings is called during container creation to modify any
|
||||
// settings necessary in the HostConfig structure.
|
||||
func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
|
||||
func (daemon *Daemon) adaptContainerSettings(daemonCfg *config.Config, hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -171,7 +171,7 @@ func verifyPlatformContainerResources(resources *containertypes.Resources, isHyp
|
|||
|
||||
// verifyPlatformContainerSettings performs platform-specific validation of the
|
||||
// hostconfig and config structures.
|
||||
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
|
||||
func verifyPlatformContainerSettings(daemon *Daemon, daemonCfg *config.Config, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
|
||||
if hostConfig == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -232,8 +232,8 @@ func configureMaxThreads(config *config.Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) initNetworkController(activeSandboxes map[string]interface{}) error {
|
||||
netOptions, err := daemon.networkOptions(nil, nil)
|
||||
func (daemon *Daemon) initNetworkController(daemonCfg *config.Config, activeSandboxes map[string]interface{}) error {
|
||||
netOptions, err := daemon.networkOptions(daemonCfg, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -396,9 +396,9 @@ func (daemon *Daemon) initNetworkController(activeSandboxes map[string]interface
|
|||
}
|
||||
}
|
||||
|
||||
if !daemon.configStore.DisableBridge {
|
||||
if !daemonCfg.DisableBridge {
|
||||
// Initialize default driver "bridge"
|
||||
if err := initBridgeDriver(daemon.netController, daemon.configStore); err != nil {
|
||||
if err := initBridgeDriver(daemon.netController, daemonCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -452,7 +452,7 @@ func (daemon *Daemon) cleanupMountsByID(in string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) cleanupMounts() error {
|
||||
func (daemon *Daemon) cleanupMounts(*config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -512,7 +512,7 @@ func driverOptions(_ *config.Config) nwconfig.Option {
|
|||
|
||||
// setDefaultIsolation determine the default isolation mode for the
|
||||
// daemon to run in. This is only applicable on Windows
|
||||
func (daemon *Daemon) setDefaultIsolation() error {
|
||||
func (daemon *Daemon) setDefaultIsolation(config *config.Config) error {
|
||||
// On client SKUs, default to Hyper-V. @engine maintainers. This
|
||||
// should not be removed. Ping Microsoft folks is there are PRs to
|
||||
// to change this.
|
||||
|
@ -521,7 +521,7 @@ func (daemon *Daemon) setDefaultIsolation() error {
|
|||
} else {
|
||||
daemon.defaultIsolation = containertypes.IsolationProcess
|
||||
}
|
||||
for _, option := range daemon.configStore.ExecOptions {
|
||||
for _, option := range config.ExecOptions {
|
||||
key, val, err := parsers.ParseKeyValueOpt(option)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -552,7 +552,7 @@ func setMayDetachMounts() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setupSeccompProfile() error {
|
||||
func (daemon *Daemon) setupSeccompProfile(*config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -562,16 +562,16 @@ func (daemon *Daemon) loadRuntimes() error {
|
|||
|
||||
func setupResolvConf(config *config.Config) {}
|
||||
|
||||
func getSysInfo(daemon *Daemon) *sysinfo.SysInfo {
|
||||
func getSysInfo(*config.Config) *sysinfo.SysInfo {
|
||||
return sysinfo.New()
|
||||
}
|
||||
|
||||
func (daemon *Daemon) initLibcontainerd(ctx context.Context) error {
|
||||
func (daemon *Daemon) initLibcontainerd(ctx context.Context, cfg *config.Config) error {
|
||||
var err error
|
||||
|
||||
rt := daemon.configStore.GetDefaultRuntimeName()
|
||||
rt := cfg.DefaultRuntime
|
||||
if rt == "" {
|
||||
if daemon.configStore.ContainerdAddr == "" {
|
||||
if cfg.ContainerdAddr == "" {
|
||||
rt = windowsV1RuntimeName
|
||||
} else {
|
||||
rt = windowsV2RuntimeName
|
||||
|
@ -583,19 +583,19 @@ func (daemon *Daemon) initLibcontainerd(ctx context.Context) error {
|
|||
daemon.containerd, err = local.NewClient(
|
||||
ctx,
|
||||
daemon.containerdCli,
|
||||
filepath.Join(daemon.configStore.ExecRoot, "containerd"),
|
||||
daemon.configStore.ContainerdNamespace,
|
||||
filepath.Join(cfg.ExecRoot, "containerd"),
|
||||
cfg.ContainerdNamespace,
|
||||
daemon,
|
||||
)
|
||||
case windowsV2RuntimeName:
|
||||
if daemon.configStore.ContainerdAddr == "" {
|
||||
if cfg.ContainerdAddr == "" {
|
||||
return fmt.Errorf("cannot use the specified runtime %q without containerd", rt)
|
||||
}
|
||||
daemon.containerd, err = remote.NewClient(
|
||||
ctx,
|
||||
daemon.containerdCli,
|
||||
filepath.Join(daemon.configStore.ExecRoot, "containerd"),
|
||||
daemon.configStore.ContainerdNamespace,
|
||||
filepath.Join(cfg.ExecRoot, "containerd"),
|
||||
cfg.ContainerdNamespace,
|
||||
daemon,
|
||||
)
|
||||
default:
|
||||
|
@ -604,7 +604,3 @@ func (daemon *Daemon) initLibcontainerd(ctx context.Context) error {
|
|||
|
||||
return err
|
||||
}
|
||||
|
||||
func (daemon *Daemon) supportsRecursivelyReadOnly(_ string) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
|
@ -24,6 +25,10 @@ import (
|
|||
// fails. If the remove succeeds, the container name is released, and
|
||||
// network links are removed.
|
||||
func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error {
|
||||
return daemon.containerRm(daemon.config(), name, config)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) containerRm(cfg *config.Config, name string, opts *types.ContainerRmConfig) error {
|
||||
start := time.Now()
|
||||
ctr, err := daemon.GetContainer(name)
|
||||
if err != nil {
|
||||
|
@ -42,17 +47,17 @@ func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig)
|
|||
return nil
|
||||
}
|
||||
|
||||
if config.RemoveLink {
|
||||
return daemon.rmLink(ctr, name)
|
||||
if opts.RemoveLink {
|
||||
return daemon.rmLink(cfg, ctr, name)
|
||||
}
|
||||
|
||||
err = daemon.cleanupContainer(ctr, *config)
|
||||
err = daemon.cleanupContainer(ctr, *opts)
|
||||
containerActions.WithValues("delete").UpdateSince(start)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (daemon *Daemon) rmLink(container *container.Container, name string) error {
|
||||
func (daemon *Daemon) rmLink(cfg *config.Config, container *container.Container, name string) error {
|
||||
if name[0] != '/' {
|
||||
name = "/" + name
|
||||
}
|
||||
|
@ -71,7 +76,7 @@ func (daemon *Daemon) rmLink(container *container.Container, name string) error
|
|||
parentContainer, _ := daemon.GetContainer(pe)
|
||||
if parentContainer != nil {
|
||||
daemon.linkIndex.unlink(name, container, parentContainer)
|
||||
if err := daemon.updateNetwork(parentContainer); err != nil {
|
||||
if err := daemon.updateNetwork(cfg, parentContainer); err != nil {
|
||||
logrus.Debugf("Could not update network to remove link %s: %v", n, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -252,7 +252,8 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio
|
|||
p.Cwd = "/"
|
||||
}
|
||||
|
||||
if err := daemon.execSetPlatformOpt(ctx, ec, p); err != nil {
|
||||
daemonCfg := daemon.config()
|
||||
if err := daemon.execSetPlatformOpt(ctx, daemonCfg, ec, p); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
coci "github.com/containerd/containerd/oci"
|
||||
"github.com/containerd/containerd/pkg/apparmor"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/oci/caps"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
@ -50,7 +51,7 @@ func getUserFromContainerd(ctx context.Context, containerdCli *containerd.Client
|
|||
return spec.Process.User, nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, ec *container.ExecConfig, p *specs.Process) error {
|
||||
func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, daemonCfg *config.Config, ec *container.ExecConfig, p *specs.Process) error {
|
||||
if len(ec.User) > 0 {
|
||||
var err error
|
||||
if daemon.UsesSnapshotter() {
|
||||
|
@ -100,5 +101,5 @@ func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, ec *container.Exec
|
|||
p.ApparmorProfile = appArmorProfile
|
||||
}
|
||||
s := &specs.Spec{Process: p}
|
||||
return WithRlimits(daemon, ec.Container)(ctx, nil, nil, s)
|
||||
return withRlimits(daemon, daemonCfg, ec.Container)(ctx, nil, nil, s)
|
||||
}
|
||||
|
|
|
@ -50,7 +50,9 @@ func TestExecSetPlatformOptAppArmor(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
d := &Daemon{configStore: &config.Config{}}
|
||||
cfg := &config.Config{}
|
||||
d := &Daemon{}
|
||||
d.configStore.Store(cfg)
|
||||
|
||||
// Currently, `docker exec --privileged` inherits the Privileged configuration
|
||||
// of the container, and does not disable AppArmor.
|
||||
|
@ -81,7 +83,7 @@ func TestExecSetPlatformOptAppArmor(t *testing.T) {
|
|||
ec := &container.ExecConfig{Container: c, Privileged: execPrivileged}
|
||||
p := &specs.Process{}
|
||||
|
||||
err := d.execSetPlatformOpt(context.Background(), ec, p)
|
||||
err := d.execSetPlatformOpt(context.Background(), cfg, ec, p)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, p.ApparmorProfile, tc.expectedProfile)
|
||||
})
|
||||
|
|
|
@ -4,10 +4,11 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, ec *container.ExecConfig, p *specs.Process) error {
|
||||
func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, daemonCfg *config.Config, ec *container.ExecConfig, p *specs.Process) error {
|
||||
if ec.Container.OS == "windows" {
|
||||
p.User.Username = ec.User
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ func (daemon *Daemon) SystemInfo() *types.Info {
|
|||
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_info"))()
|
||||
|
||||
sysInfo := daemon.RawSysInfo()
|
||||
cfg := daemon.config()
|
||||
|
||||
v := &types.Info{
|
||||
ID: daemon.id,
|
||||
|
@ -50,27 +51,27 @@ func (daemon *Daemon) SystemInfo() *types.Info {
|
|||
NCPU: sysinfo.NumCPU(),
|
||||
MemTotal: memInfo().MemTotal,
|
||||
GenericResources: daemon.genericResources,
|
||||
DockerRootDir: daemon.configStore.Root,
|
||||
Labels: daemon.configStore.Labels,
|
||||
ExperimentalBuild: daemon.configStore.Experimental,
|
||||
DockerRootDir: cfg.Root,
|
||||
Labels: cfg.Labels,
|
||||
ExperimentalBuild: cfg.Experimental,
|
||||
ServerVersion: dockerversion.Version,
|
||||
HTTPProxy: config.MaskCredentials(getConfigOrEnv(daemon.configStore.HTTPProxy, "HTTP_PROXY", "http_proxy")),
|
||||
HTTPSProxy: config.MaskCredentials(getConfigOrEnv(daemon.configStore.HTTPSProxy, "HTTPS_PROXY", "https_proxy")),
|
||||
NoProxy: getConfigOrEnv(daemon.configStore.NoProxy, "NO_PROXY", "no_proxy"),
|
||||
LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled,
|
||||
HTTPProxy: config.MaskCredentials(getConfigOrEnv(cfg.HTTPProxy, "HTTP_PROXY", "http_proxy")),
|
||||
HTTPSProxy: config.MaskCredentials(getConfigOrEnv(cfg.HTTPSProxy, "HTTPS_PROXY", "https_proxy")),
|
||||
NoProxy: getConfigOrEnv(cfg.NoProxy, "NO_PROXY", "no_proxy"),
|
||||
LiveRestoreEnabled: cfg.LiveRestoreEnabled,
|
||||
Isolation: daemon.defaultIsolation,
|
||||
}
|
||||
|
||||
daemon.fillContainerStates(v)
|
||||
daemon.fillDebugInfo(v)
|
||||
daemon.fillAPIInfo(v)
|
||||
daemon.fillAPIInfo(v, cfg)
|
||||
// Retrieve platform specific info
|
||||
daemon.fillPlatformInfo(v, sysInfo)
|
||||
daemon.fillPlatformInfo(v, sysInfo, cfg)
|
||||
daemon.fillDriverInfo(v)
|
||||
daemon.fillPluginsInfo(v)
|
||||
daemon.fillSecurityOptions(v, sysInfo)
|
||||
daemon.fillPluginsInfo(v, cfg)
|
||||
daemon.fillSecurityOptions(v, sysInfo, cfg)
|
||||
daemon.fillLicense(v)
|
||||
daemon.fillDefaultAddressPools(v)
|
||||
daemon.fillDefaultAddressPools(v, cfg)
|
||||
|
||||
return v
|
||||
}
|
||||
|
@ -80,6 +81,7 @@ func (daemon *Daemon) SystemVersion() types.Version {
|
|||
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_version"))()
|
||||
|
||||
kernelVersion := kernelVersion()
|
||||
cfg := daemon.config()
|
||||
|
||||
v := types.Version{
|
||||
Components: []types.ComponentVersion{
|
||||
|
@ -95,7 +97,7 @@ func (daemon *Daemon) SystemVersion() types.Version {
|
|||
"Arch": runtime.GOARCH,
|
||||
"BuildTime": dockerversion.BuildTime,
|
||||
"KernelVersion": kernelVersion,
|
||||
"Experimental": fmt.Sprintf("%t", daemon.configStore.Experimental),
|
||||
"Experimental": fmt.Sprintf("%t", cfg.Experimental),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -110,12 +112,12 @@ func (daemon *Daemon) SystemVersion() types.Version {
|
|||
Arch: runtime.GOARCH,
|
||||
BuildTime: dockerversion.BuildTime,
|
||||
KernelVersion: kernelVersion,
|
||||
Experimental: daemon.configStore.Experimental,
|
||||
Experimental: cfg.Experimental,
|
||||
}
|
||||
|
||||
v.Platform.Name = dockerversion.PlatformName
|
||||
|
||||
daemon.fillPlatformVersion(&v)
|
||||
daemon.fillPlatformVersion(&v, cfg)
|
||||
return v
|
||||
}
|
||||
|
||||
|
@ -135,19 +137,19 @@ WARNING: The %s storage-driver is deprecated, and will be removed in a future re
|
|||
fillDriverWarnings(v)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) fillPluginsInfo(v *types.Info) {
|
||||
func (daemon *Daemon) fillPluginsInfo(v *types.Info, cfg *config.Config) {
|
||||
v.Plugins = types.PluginsInfo{
|
||||
Volume: daemon.volumes.GetDriverList(),
|
||||
Network: daemon.GetNetworkDriverList(),
|
||||
|
||||
// The authorization plugins are returned in the order they are
|
||||
// used as they constitute a request/response modification chain.
|
||||
Authorization: daemon.configStore.AuthorizationPlugins,
|
||||
Authorization: cfg.AuthorizationPlugins,
|
||||
Log: logger.ListDrivers(),
|
||||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInfo) {
|
||||
func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInfo, cfg *config.Config) {
|
||||
var securityOptions []string
|
||||
if sysInfo.AppArmor {
|
||||
securityOptions = append(securityOptions, "name=apparmor")
|
||||
|
@ -164,13 +166,13 @@ func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInf
|
|||
if rootIDs := daemon.idMapping.RootPair(); rootIDs.UID != 0 || rootIDs.GID != 0 {
|
||||
securityOptions = append(securityOptions, "name=userns")
|
||||
}
|
||||
if daemon.Rootless() {
|
||||
if Rootless(cfg) {
|
||||
securityOptions = append(securityOptions, "name=rootless")
|
||||
}
|
||||
if daemon.cgroupNamespacesEnabled(sysInfo) {
|
||||
if cgroupNamespacesEnabled(sysInfo, cfg) {
|
||||
securityOptions = append(securityOptions, "name=cgroupns")
|
||||
}
|
||||
if daemon.noNewPrivileges() {
|
||||
if noNewPrivileges(cfg) {
|
||||
securityOptions = append(securityOptions, "name=no-new-privileges")
|
||||
}
|
||||
|
||||
|
@ -200,13 +202,12 @@ func (daemon *Daemon) fillDebugInfo(v *types.Info) {
|
|||
v.NEventsListener = daemon.EventsService.SubscribersCount()
|
||||
}
|
||||
|
||||
func (daemon *Daemon) fillAPIInfo(v *types.Info) {
|
||||
func (daemon *Daemon) fillAPIInfo(v *types.Info, cfg *config.Config) {
|
||||
const warn string = `
|
||||
Access to the remote API is equivalent to root access on the host. Refer
|
||||
to the 'Docker daemon attack surface' section in the documentation for
|
||||
more information: https://docs.docker.com/go/attack-surface/`
|
||||
|
||||
cfg := daemon.configStore
|
||||
for _, host := range cfg.Hosts {
|
||||
// cnf.Hosts is normalized during startup, so should always have a scheme/proto
|
||||
proto, addr, _ := strings.Cut(host, "://")
|
||||
|
@ -224,8 +225,8 @@ func (daemon *Daemon) fillAPIInfo(v *types.Info) {
|
|||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) fillDefaultAddressPools(v *types.Info) {
|
||||
for _, pool := range daemon.configStore.DefaultAddressPools.Value() {
|
||||
func (daemon *Daemon) fillDefaultAddressPools(v *types.Info, cfg *config.Config) {
|
||||
for _, pool := range cfg.DefaultAddressPools.Value() {
|
||||
v.DefaultAddressPools = append(v.DefaultAddressPools, types.NetworkAddressPool{
|
||||
Base: pool.Base,
|
||||
Size: pool.Size,
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/rootless"
|
||||
"github.com/docker/docker/pkg/sysinfo"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -18,8 +19,8 @@ import (
|
|||
)
|
||||
|
||||
// fillPlatformInfo fills the platform related info.
|
||||
func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {
|
||||
v.CgroupDriver = daemon.getCgroupDriver()
|
||||
func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo, cfg *config.Config) {
|
||||
v.CgroupDriver = cgroupDriver(cfg)
|
||||
v.CgroupVersion = "1"
|
||||
if sysInfo.CgroupUnified {
|
||||
v.CgroupVersion = "2"
|
||||
|
@ -37,13 +38,13 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo)
|
|||
v.CPUSet = sysInfo.Cpuset
|
||||
v.PidsLimit = sysInfo.PidsLimit
|
||||
}
|
||||
v.Runtimes = daemon.configStore.GetAllRuntimes()
|
||||
v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()
|
||||
v.Runtimes = cfg.GetAllRuntimes()
|
||||
v.DefaultRuntime = cfg.DefaultRuntime
|
||||
v.RuncCommit.ID = "N/A"
|
||||
v.ContainerdCommit.ID = "N/A"
|
||||
v.InitCommit.ID = "N/A"
|
||||
|
||||
if rt := daemon.configStore.GetRuntime(v.DefaultRuntime); rt != nil {
|
||||
if rt := cfg.GetRuntime(v.DefaultRuntime); rt != nil {
|
||||
if rv, err := exec.Command(rt.Path, "--version").Output(); err == nil {
|
||||
if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil {
|
||||
logrus.Warnf("failed to parse %s version: %v", rt.Path, err)
|
||||
|
@ -61,8 +62,8 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo)
|
|||
logrus.Warnf("failed to retrieve containerd version: %v", err)
|
||||
}
|
||||
|
||||
v.InitBinary = daemon.configStore.GetInitPath()
|
||||
if initBinary, err := daemon.configStore.LookupInitPath(); err != nil {
|
||||
v.InitBinary = cfg.GetInitPath()
|
||||
if initBinary, err := cfg.LookupInitPath(); err != nil {
|
||||
logrus.Warnf("failed to find docker-init: %s", err)
|
||||
} else if rv, err := exec.Command(initBinary, "--version").Output(); err == nil {
|
||||
if _, commit, err := parseInitVersion(string(rv)); err != nil {
|
||||
|
@ -165,7 +166,7 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo)
|
|||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) fillPlatformVersion(v *types.Version) {
|
||||
func (daemon *Daemon) fillPlatformVersion(v *types.Version, cfg *config.Config) {
|
||||
if rv, err := daemon.containerd.Version(context.Background()); err == nil {
|
||||
v.Components = append(v.Components, types.ComponentVersion{
|
||||
Name: "containerd",
|
||||
|
@ -176,8 +177,8 @@ func (daemon *Daemon) fillPlatformVersion(v *types.Version) {
|
|||
})
|
||||
}
|
||||
|
||||
defaultRuntime := daemon.configStore.GetDefaultRuntimeName()
|
||||
if rt := daemon.configStore.GetRuntime(defaultRuntime); rt != nil {
|
||||
defaultRuntime := cfg.DefaultRuntime
|
||||
if rt := cfg.GetRuntime(defaultRuntime); rt != nil {
|
||||
if rv, err := exec.Command(rt.Path, "--version").Output(); err == nil {
|
||||
if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil {
|
||||
logrus.Warnf("failed to parse %s version: %v", rt.Path, err)
|
||||
|
@ -195,7 +196,7 @@ func (daemon *Daemon) fillPlatformVersion(v *types.Version) {
|
|||
}
|
||||
}
|
||||
|
||||
if initBinary, err := daemon.configStore.LookupInitPath(); err != nil {
|
||||
if initBinary, err := cfg.LookupInitPath(); err != nil {
|
||||
logrus.Warnf("failed to find docker-init: %s", err)
|
||||
} else if rv, err := exec.Command(initBinary, "--version").Output(); err == nil {
|
||||
if ver, commit, err := parseInitVersion(string(rv)); err != nil {
|
||||
|
@ -337,15 +338,15 @@ func parseRuntimeVersion(v string) (runtime string, version string, commit strin
|
|||
return runtime, version, commit, err
|
||||
}
|
||||
|
||||
func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool {
|
||||
return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate()
|
||||
func cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo, cfg *config.Config) bool {
|
||||
return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(cfg.CgroupNamespaceMode).IsPrivate()
|
||||
}
|
||||
|
||||
// Rootless returns true if daemon is running in rootless mode
|
||||
func (daemon *Daemon) Rootless() bool {
|
||||
return daemon.configStore.Rootless
|
||||
func Rootless(cfg *config.Config) bool {
|
||||
return cfg.Rootless
|
||||
}
|
||||
|
||||
func (daemon *Daemon) noNewPrivileges() bool {
|
||||
return daemon.configStore.NoNewPrivileges
|
||||
func noNewPrivileges(cfg *config.Config) bool {
|
||||
return cfg.NoNewPrivileges
|
||||
}
|
||||
|
|
|
@ -2,27 +2,28 @@ package daemon // import "github.com/docker/docker/daemon"
|
|||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/sysinfo"
|
||||
)
|
||||
|
||||
// fillPlatformInfo fills the platform related info.
|
||||
func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {
|
||||
func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo, cfg *config.Config) {
|
||||
}
|
||||
|
||||
func (daemon *Daemon) fillPlatformVersion(v *types.Version) {}
|
||||
func (daemon *Daemon) fillPlatformVersion(v *types.Version, cfg *config.Config) {}
|
||||
|
||||
func fillDriverWarnings(v *types.Info) {
|
||||
}
|
||||
|
||||
func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool {
|
||||
func cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo, cfg *config.Config) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Rootless returns true if daemon is running in rootless mode
|
||||
func (daemon *Daemon) Rootless() bool {
|
||||
func Rootless(*config.Config) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (daemon *Daemon) noNewPrivileges() bool {
|
||||
func noNewPrivileges(*config.Config) bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/api/types/versions/v1p20"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/go-connections/nat"
|
||||
|
@ -40,7 +41,7 @@ func (daemon *Daemon) ContainerInspectCurrent(ctx context.Context, name string,
|
|||
|
||||
ctr.Lock()
|
||||
|
||||
base, err := daemon.getInspectData(ctr)
|
||||
base, err := daemon.getInspectData(daemon.config(), ctr)
|
||||
if err != nil {
|
||||
ctr.Unlock()
|
||||
return nil, err
|
||||
|
@ -105,7 +106,7 @@ func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er
|
|||
ctr.Lock()
|
||||
defer ctr.Unlock()
|
||||
|
||||
base, err := daemon.getInspectData(ctr)
|
||||
base, err := daemon.getInspectData(daemon.config(), ctr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -124,7 +125,7 @@ func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) getInspectData(container *container.Container) (*types.ContainerJSONBase, error) {
|
||||
func (daemon *Daemon) getInspectData(daemonCfg *config.Config, container *container.Container) (*types.ContainerJSONBase, error) {
|
||||
// make a copy to play with
|
||||
hostConfig := *container.HostConfig
|
||||
|
||||
|
@ -135,7 +136,7 @@ func (daemon *Daemon) getInspectData(container *container.Container) (*types.Con
|
|||
}
|
||||
|
||||
// We merge the Ulimits from hostConfig with daemon default
|
||||
daemon.mergeUlimits(&hostConfig)
|
||||
daemon.mergeUlimits(&hostConfig, daemonCfg)
|
||||
|
||||
var containerHealth *types.Health
|
||||
if container.State.Health != nil {
|
||||
|
|
|
@ -29,7 +29,7 @@ func (daemon *Daemon) containerInspectPre120(ctx context.Context, name string) (
|
|||
ctr.Lock()
|
||||
defer ctr.Unlock()
|
||||
|
||||
base, err := daemon.getInspectData(ctr)
|
||||
base, err := daemon.getInspectData(daemon.config(), ctr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -19,16 +19,18 @@ func TestGetInspectData(t *testing.T) {
|
|||
}
|
||||
|
||||
d := &Daemon{
|
||||
linkIndex: newLinkIndex(),
|
||||
configStore: &config.Config{},
|
||||
linkIndex: newLinkIndex(),
|
||||
}
|
||||
if d.UsesSnapshotter() {
|
||||
t.Skip("does not apply to containerd snapshotters, which don't have RWLayer set")
|
||||
}
|
||||
_, err := d.getInspectData(c)
|
||||
cfg := &config.Config{}
|
||||
d.configStore.Store(cfg)
|
||||
|
||||
_, err := d.getInspectData(cfg, c)
|
||||
assert.Check(t, is.ErrorContains(err, "RWLayer of container inspect-me is unexpectedly nil"))
|
||||
|
||||
c.Dead = true
|
||||
_, err = d.getInspectData(c)
|
||||
_, err = d.getInspectData(cfg, c)
|
||||
assert.Check(t, err)
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
containertypes "github.com/docker/docker/api/types/container"
|
||||
timetypes "github.com/docker/docker/api/types/time"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/logger"
|
||||
logcache "github.com/docker/docker/daemon/logger/loggerutils/cache"
|
||||
"github.com/docker/docker/errdefs"
|
||||
|
@ -196,18 +197,14 @@ func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) err
|
|||
return logger.ValidateLogOpts(cfg.Type, cfg.Config)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setupDefaultLogConfig() error {
|
||||
config := daemon.configStore
|
||||
if len(config.LogConfig.Config) > 0 {
|
||||
if err := logger.ValidateLogOpts(config.LogConfig.Type, config.LogConfig.Config); err != nil {
|
||||
return errors.Wrap(err, "failed to set log opts")
|
||||
func defaultLogConfig(cfg *config.Config) (containertypes.LogConfig, error) {
|
||||
if len(cfg.LogConfig.Config) > 0 {
|
||||
if err := logger.ValidateLogOpts(cfg.LogConfig.Type, cfg.LogConfig.Config); err != nil {
|
||||
return containertypes.LogConfig{}, errors.Wrap(err, "failed to set log opts")
|
||||
}
|
||||
}
|
||||
daemon.defaultLogConfig = containertypes.LogConfig{
|
||||
Type: config.LogConfig.Type,
|
||||
Config: config.LogConfig.Config,
|
||||
}
|
||||
|
||||
logrus.Debugf("Using default logging driver %s", daemon.defaultLogConfig.Type)
|
||||
return nil
|
||||
return containertypes.LogConfig{
|
||||
Type: cfg.LogConfig.Type,
|
||||
Config: cfg.LogConfig.Config,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
"github.com/docker/docker/plugin"
|
||||
|
@ -19,8 +20,8 @@ import (
|
|||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func (daemon *Daemon) listenMetricsSock() (string, error) {
|
||||
path := filepath.Join(daemon.configStore.ExecRoot, "metrics.sock")
|
||||
func (daemon *Daemon) listenMetricsSock(cfg *config.Config) (string, error) {
|
||||
path := filepath.Join(cfg.ExecRoot, "metrics.sock")
|
||||
unix.Unlink(path)
|
||||
l, err := net.Listen("unix", path)
|
||||
if err != nil {
|
||||
|
|
|
@ -2,11 +2,14 @@
|
|||
|
||||
package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import "github.com/docker/docker/pkg/plugingetter"
|
||||
import (
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
)
|
||||
|
||||
func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) {
|
||||
}
|
||||
|
||||
func (daemon *Daemon) listenMetricsSock() (string, error) {
|
||||
func (daemon *Daemon) listenMetricsSock(*config.Config) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/errdefs"
|
||||
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
|
||||
"github.com/docker/docker/restartmanager"
|
||||
|
@ -29,6 +30,8 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
|
|||
var exitStatus container.ExitStatus
|
||||
c.Lock()
|
||||
|
||||
cfg := daemon.config()
|
||||
|
||||
// Health checks will be automatically restarted if/when the
|
||||
// container is started again.
|
||||
daemon.stopHealthchecks(c)
|
||||
|
@ -99,7 +102,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
|
|||
} else {
|
||||
c.SetStopped(&exitStatus)
|
||||
if !c.HasBeenManuallyRestarted {
|
||||
defer daemon.autoRemove(c)
|
||||
defer daemon.autoRemove(cfg, c)
|
||||
}
|
||||
}
|
||||
defer c.Unlock() // needs to be called before autoRemove
|
||||
|
@ -117,7 +120,8 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
|
|||
// But containerStart will use daemon.netController segment.
|
||||
// So to avoid panic at startup process, here must wait util daemon restore done.
|
||||
daemon.waitForStartupDone()
|
||||
if err = daemon.containerStart(context.Background(), c, "", "", false); err != nil {
|
||||
cfg := daemon.config() // Apply the most up-to-date daemon config to the restarted container.
|
||||
if err = daemon.containerStart(context.Background(), cfg, c, "", "", false); err != nil {
|
||||
logrus.Debugf("failed to restart container: %+v", err)
|
||||
}
|
||||
}
|
||||
|
@ -127,7 +131,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
|
|||
daemon.setStateCounter(c)
|
||||
c.CheckpointTo(daemon.containersReplica)
|
||||
c.Unlock()
|
||||
defer daemon.autoRemove(c)
|
||||
defer daemon.autoRemove(cfg, c)
|
||||
if err != restartmanager.ErrRestartCanceled {
|
||||
logrus.Errorf("restartmanger wait error: %+v", err)
|
||||
}
|
||||
|
@ -280,7 +284,7 @@ func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei
|
|||
return nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) autoRemove(c *container.Container) {
|
||||
func (daemon *Daemon) autoRemove(cfg *config.Config, c *container.Container) {
|
||||
c.Lock()
|
||||
ar := c.HostConfig.AutoRemove
|
||||
c.Unlock()
|
||||
|
@ -288,7 +292,7 @@ func (daemon *Daemon) autoRemove(c *container.Container) {
|
|||
return
|
||||
}
|
||||
|
||||
err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true})
|
||||
err := daemon.containerRm(cfg, c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true})
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/container"
|
||||
clustertypes "github.com/docker/docker/daemon/cluster/provider"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
internalnetwork "github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
|
@ -160,7 +161,7 @@ func (daemon *Daemon) startIngressWorker() {
|
|||
select {
|
||||
case r := <-ingressJobsChannel:
|
||||
if r.create != nil {
|
||||
daemon.setupIngress(r.create, r.ip, ingressID)
|
||||
daemon.setupIngress(daemon.config(), r.create, r.ip, ingressID)
|
||||
ingressID = r.create.ID
|
||||
} else {
|
||||
daemon.releaseIngress(ingressID)
|
||||
|
@ -199,7 +200,7 @@ func (daemon *Daemon) ReleaseIngress() (<-chan struct{}, error) {
|
|||
return done, nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) {
|
||||
func (daemon *Daemon) setupIngress(cfg *config.Config, create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) {
|
||||
controller := daemon.netController
|
||||
controller.AgentInitWait()
|
||||
|
||||
|
@ -207,7 +208,7 @@ func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip
|
|||
daemon.releaseIngress(staleID)
|
||||
}
|
||||
|
||||
if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil {
|
||||
if _, err := daemon.createNetwork(cfg, create.NetworkCreateRequest, create.ID, true); err != nil {
|
||||
// If it is any other error other than already
|
||||
// exists error log error and return.
|
||||
if _, ok := err.(libnetwork.NetworkNameError); !ok {
|
||||
|
@ -277,16 +278,16 @@ func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networ
|
|||
|
||||
// CreateManagedNetwork creates an agent network.
|
||||
func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error {
|
||||
_, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true)
|
||||
_, err := daemon.createNetwork(daemon.config(), create.NetworkCreateRequest, create.ID, true)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateNetwork creates a network with the given name, driver and other optional parameters
|
||||
func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) {
|
||||
return daemon.createNetwork(create, "", false)
|
||||
return daemon.createNetwork(daemon.config(), create, "", false)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
|
||||
func (daemon *Daemon) createNetwork(cfg *config.Config, create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
|
||||
if runconfig.IsPreDefinedNetwork(create.Name) {
|
||||
return nil, PredefinedNetworkError(create.Name)
|
||||
}
|
||||
|
@ -319,7 +320,7 @@ func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string
|
|||
for k, v := range create.Options {
|
||||
networkOptions[k] = v
|
||||
}
|
||||
if defaultOpts, ok := daemon.configStore.DefaultNetworkOpts[driver]; create.ConfigFrom == nil && ok {
|
||||
if defaultOpts, ok := cfg.DefaultNetworkOpts[driver]; create.ConfigFrom == nil && ok {
|
||||
for k, v := range defaultOpts {
|
||||
if _, ok := networkOptions[k]; !ok {
|
||||
logrus.WithFields(logrus.Fields{"driver": driver, "network": id, k: v}).Debug("Applying network default option")
|
||||
|
|
|
@ -36,15 +36,15 @@ import (
|
|||
|
||||
const inContainerInitPath = "/sbin/" + dconfig.DefaultInitBinary
|
||||
|
||||
// WithRlimits sets the container's rlimits along with merging the daemon's rlimits
|
||||
func WithRlimits(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
||||
// withRlimits sets the container's rlimits along with merging the daemon's rlimits
|
||||
func withRlimits(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts {
|
||||
return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error {
|
||||
var rlimits []specs.POSIXRlimit
|
||||
|
||||
// We want to leave the original HostConfig alone so make a copy here
|
||||
hostConfig := *c.HostConfig
|
||||
// Merge with the daemon defaults
|
||||
daemon.mergeUlimits(&hostConfig)
|
||||
daemon.mergeUlimits(&hostConfig, daemonCfg)
|
||||
for _, ul := range hostConfig.Ulimits {
|
||||
rlimits = append(rlimits, specs.POSIXRlimit{
|
||||
Type: "RLIMIT_" + strings.ToUpper(ul.Name),
|
||||
|
@ -58,8 +58,8 @@ func WithRlimits(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
|||
}
|
||||
}
|
||||
|
||||
// WithLibnetwork sets the libnetwork hook
|
||||
func WithLibnetwork(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
||||
// withLibnetwork sets the libnetwork hook
|
||||
func withLibnetwork(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts {
|
||||
return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error {
|
||||
if s.Hooks == nil {
|
||||
s.Hooks = &specs.Hooks{}
|
||||
|
@ -72,7 +72,7 @@ func WithLibnetwork(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
|||
Path: target,
|
||||
Args: []string{
|
||||
"libnetwork-setkey",
|
||||
"-exec-root=" + daemon.configStore.GetExecRoot(),
|
||||
"-exec-root=" + daemonCfg.GetExecRoot(),
|
||||
c.ID,
|
||||
shortNetCtlrID,
|
||||
},
|
||||
|
@ -83,11 +83,11 @@ func WithLibnetwork(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
|||
}
|
||||
}
|
||||
|
||||
// WithRootless sets the spec to the rootless configuration
|
||||
func WithRootless(daemon *Daemon) coci.SpecOpts {
|
||||
// withRootless sets the spec to the rootless configuration
|
||||
func withRootless(daemon *Daemon, daemonCfg *dconfig.Config) coci.SpecOpts {
|
||||
return func(_ context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error {
|
||||
var v2Controllers []string
|
||||
if daemon.getCgroupDriver() == cgroupSystemdDriver {
|
||||
if cgroupDriver(daemonCfg) == cgroupSystemdDriver {
|
||||
if cdcgroups.Mode() != cdcgroups.Unified {
|
||||
return errors.New("rootless systemd driver doesn't support cgroup v1")
|
||||
}
|
||||
|
@ -488,8 +488,8 @@ func inSlice(slice []string, s string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// WithMounts sets the container's mounts
|
||||
func WithMounts(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
||||
// withMounts sets the container's mounts
|
||||
func withMounts(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts {
|
||||
return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) (err error) {
|
||||
if err := daemon.setupContainerMountsRoot(c); err != nil {
|
||||
return err
|
||||
|
@ -652,7 +652,7 @@ func WithMounts(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
|||
return errors.New("mount options conflict: ReadOnlyNonRecursive && ReadOnlyForceRecursive")
|
||||
}
|
||||
}
|
||||
if rroErr := daemon.supportsRecursivelyReadOnly(c.HostConfig.Runtime); rroErr != nil {
|
||||
if rroErr := supportsRecursivelyReadOnly(daemonCfg, c.HostConfig.Runtime); rroErr != nil {
|
||||
rro = false
|
||||
if m.ReadOnlyForceRecursive {
|
||||
return rroErr
|
||||
|
@ -673,7 +673,7 @@ func WithMounts(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
|||
// "mount" when we bind-mount. The reason for this is that at the point
|
||||
// when runc sets up the root filesystem, it is already inside a user
|
||||
// namespace, and thus cannot change any flags that are locked.
|
||||
if daemon.configStore.RemappedRoot != "" || userns.RunningInUserNS() {
|
||||
if daemonCfg.RemappedRoot != "" || userns.RunningInUserNS() {
|
||||
unprivOpts, err := getUnprivilegedMountFlags(m.Source)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -732,8 +732,8 @@ func sysctlExists(s string) bool {
|
|||
return err == nil
|
||||
}
|
||||
|
||||
// WithCommonOptions sets common docker options
|
||||
func WithCommonOptions(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
||||
// withCommonOptions sets common docker options
|
||||
func withCommonOptions(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts {
|
||||
return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error {
|
||||
if c.BaseFS == "" && !daemon.UsesSnapshotter() {
|
||||
return errors.New("populateCommonSpec: BaseFS of container " + c.ID + " is unexpectedly empty")
|
||||
|
@ -762,9 +762,9 @@ func WithCommonOptions(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
|||
// host namespace or another container's pid namespace where we already have an init
|
||||
if c.HostConfig.PidMode.IsPrivate() {
|
||||
if (c.HostConfig.Init != nil && *c.HostConfig.Init) ||
|
||||
(c.HostConfig.Init == nil && daemon.configStore.Init) {
|
||||
(c.HostConfig.Init == nil && daemonCfg.Init) {
|
||||
s.Process.Args = append([]string{inContainerInitPath, "--", c.Path}, c.Args...)
|
||||
path, err := daemon.configStore.LookupInitPath() // this will fall back to DefaultInitBinary and return an absolute path
|
||||
path, err := daemonCfg.LookupInitPath() // this will fall back to DefaultInitBinary and return an absolute path
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -790,7 +790,7 @@ func WithCommonOptions(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
|||
// joining an existing namespace, only if we create a new net namespace.
|
||||
if c.HostConfig.NetworkMode.IsPrivate() {
|
||||
// We cannot set up ping socket support in a user namespace
|
||||
userNS := daemon.configStore.RemappedRoot != "" && c.HostConfig.UsernsMode.IsPrivate()
|
||||
userNS := daemonCfg.RemappedRoot != "" && c.HostConfig.UsernsMode.IsPrivate()
|
||||
if !userNS && !userns.RunningInUserNS() && sysctlExists("net.ipv4.ping_group_range") {
|
||||
// allow unprivileged ICMP echo sockets without CAP_NET_RAW
|
||||
s.Linux.Sysctl["net.ipv4.ping_group_range"] = "0 2147483647"
|
||||
|
@ -805,24 +805,24 @@ func WithCommonOptions(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
|||
}
|
||||
}
|
||||
|
||||
// WithCgroups sets the container's cgroups
|
||||
func WithCgroups(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
||||
// withCgroups sets the container's cgroups
|
||||
func withCgroups(daemon *Daemon, daemonCfg *dconfig.Config, c *container.Container) coci.SpecOpts {
|
||||
return func(ctx context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error {
|
||||
var cgroupsPath string
|
||||
scopePrefix := "docker"
|
||||
parent := "/docker"
|
||||
useSystemd := UsingSystemd(daemon.configStore)
|
||||
useSystemd := UsingSystemd(daemonCfg)
|
||||
if useSystemd {
|
||||
parent = "system.slice"
|
||||
if daemon.configStore.Rootless {
|
||||
if daemonCfg.Rootless {
|
||||
parent = "user.slice"
|
||||
}
|
||||
}
|
||||
|
||||
if c.HostConfig.CgroupParent != "" {
|
||||
parent = c.HostConfig.CgroupParent
|
||||
} else if daemon.configStore.CgroupParent != "" {
|
||||
parent = daemon.configStore.CgroupParent
|
||||
} else if daemonCfg.CgroupParent != "" {
|
||||
parent = daemonCfg.CgroupParent
|
||||
}
|
||||
|
||||
if useSystemd {
|
||||
|
@ -835,7 +835,7 @@ func WithCgroups(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
|||
|
||||
// the rest is only needed for CPU RT controller
|
||||
|
||||
if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 {
|
||||
if daemonCfg.CPURealtimePeriod == 0 && daemonCfg.CPURealtimeRuntime == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -869,7 +869,7 @@ func WithCgroups(daemon *Daemon, c *container.Container) coci.SpecOpts {
|
|||
}
|
||||
mnt = filepath.Join(mnt, root)
|
||||
|
||||
if err := daemon.initCPURtController(mnt, parentPath); err != nil {
|
||||
if err := daemon.initCPURtController(daemonCfg, mnt, parentPath); err != nil {
|
||||
return errors.Wrap(err, "unable to init CPU RT controller")
|
||||
}
|
||||
return nil
|
||||
|
@ -1019,23 +1019,23 @@ func WithUser(c *container.Container) coci.SpecOpts {
|
|||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (retSpec *specs.Spec, err error) {
|
||||
func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *dconfig.Config, c *container.Container) (retSpec *specs.Spec, err error) {
|
||||
var (
|
||||
opts []coci.SpecOpts
|
||||
s = oci.DefaultSpec()
|
||||
)
|
||||
opts = append(opts,
|
||||
WithCommonOptions(daemon, c),
|
||||
WithCgroups(daemon, c),
|
||||
withCommonOptions(daemon, daemonCfg, c),
|
||||
withCgroups(daemon, daemonCfg, c),
|
||||
WithResources(c),
|
||||
WithSysctls(c),
|
||||
WithDevices(daemon, c),
|
||||
WithRlimits(daemon, c),
|
||||
withRlimits(daemon, daemonCfg, c),
|
||||
WithNamespaces(daemon, c),
|
||||
WithCapabilities(c),
|
||||
WithSeccomp(daemon, c),
|
||||
WithMounts(daemon, c),
|
||||
WithLibnetwork(daemon, c),
|
||||
withMounts(daemon, daemonCfg, c),
|
||||
withLibnetwork(daemon, daemonCfg, c),
|
||||
WithApparmor(c),
|
||||
WithSelinux(c),
|
||||
WithOOMScore(&c.HostConfig.OomScoreAdj),
|
||||
|
@ -1068,8 +1068,8 @@ func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (r
|
|||
if c.HostConfig.ReadonlyPaths != nil {
|
||||
opts = append(opts, coci.WithReadonlyPaths(c.HostConfig.ReadonlyPaths))
|
||||
}
|
||||
if daemon.configStore.Rootless {
|
||||
opts = append(opts, WithRootless(daemon))
|
||||
if daemonCfg.Rootless {
|
||||
opts = append(opts, withRootless(daemon, daemonCfg))
|
||||
}
|
||||
|
||||
var snapshotter, snapshotKey string
|
||||
|
@ -1096,14 +1096,14 @@ func clearReadOnly(m *specs.Mount) {
|
|||
}
|
||||
|
||||
// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig
|
||||
func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) {
|
||||
func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig, daemonCfg *dconfig.Config) {
|
||||
ulimits := c.Ulimits
|
||||
// Merge ulimits with daemon defaults
|
||||
ulIdx := make(map[string]struct{})
|
||||
for _, ul := range ulimits {
|
||||
ulIdx[ul.Name] = struct{}{}
|
||||
}
|
||||
for name, ul := range daemon.configStore.Ulimits {
|
||||
for name, ul := range daemonCfg.Ulimits {
|
||||
if _, exists := ulIdx[name]; !exists {
|
||||
ulimits = append(ulimits, ul)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ func setupFakeDaemon(t *testing.T, c *container.Container) *Daemon {
|
|||
d := &Daemon{
|
||||
// some empty structs to avoid getting a panic
|
||||
// caused by a null pointer dereference
|
||||
configStore: &config.Config{},
|
||||
linkIndex: newLinkIndex(),
|
||||
netController: netController,
|
||||
imageService: &fakeImageService{},
|
||||
|
@ -83,7 +82,7 @@ func TestTmpfsDevShmNoDupMount(t *testing.T) {
|
|||
d := setupFakeDaemon(t, c)
|
||||
defer cleanupFakeContainer(c)
|
||||
|
||||
_, err := d.createSpec(context.TODO(), c)
|
||||
_, err := d.createSpec(context.TODO(), &config.Config{}, c)
|
||||
assert.Check(t, err)
|
||||
}
|
||||
|
||||
|
@ -102,7 +101,7 @@ func TestIpcPrivateVsReadonly(t *testing.T) {
|
|||
d := setupFakeDaemon(t, c)
|
||||
defer cleanupFakeContainer(c)
|
||||
|
||||
s, err := d.createSpec(context.TODO(), c)
|
||||
s, err := d.createSpec(context.TODO(), &config.Config{}, c)
|
||||
assert.Check(t, err)
|
||||
|
||||
// Find the /dev/shm mount in ms, check it does not have ro
|
||||
|
@ -132,7 +131,7 @@ func TestSysctlOverride(t *testing.T) {
|
|||
defer cleanupFakeContainer(c)
|
||||
|
||||
// Ensure that the implicit sysctl is set correctly.
|
||||
s, err := d.createSpec(context.TODO(), c)
|
||||
s, err := d.createSpec(context.TODO(), &config.Config{}, c)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, s.Hostname, "foobar")
|
||||
assert.Equal(t, s.Linux.Sysctl["kernel.domainname"], c.Config.Domainname)
|
||||
|
@ -148,15 +147,14 @@ func TestSysctlOverride(t *testing.T) {
|
|||
assert.Assert(t, c.HostConfig.Sysctls["kernel.domainname"] != c.Config.Domainname)
|
||||
c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"] = "1024"
|
||||
|
||||
s, err = d.createSpec(context.TODO(), c)
|
||||
s, err = d.createSpec(context.TODO(), &config.Config{}, c)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, s.Hostname, "foobar")
|
||||
assert.Equal(t, s.Linux.Sysctl["kernel.domainname"], c.HostConfig.Sysctls["kernel.domainname"])
|
||||
assert.Equal(t, s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"])
|
||||
|
||||
// Ensure the ping_group_range is not set on a daemon with user-namespaces enabled
|
||||
d.configStore.RemappedRoot = "dummy:dummy"
|
||||
s, err = d.createSpec(context.TODO(), c)
|
||||
s, err = d.createSpec(context.TODO(), &config.Config{RemappedRoot: "dummy:dummy"}, c)
|
||||
assert.NilError(t, err)
|
||||
_, ok := s.Linux.Sysctl["net.ipv4.ping_group_range"]
|
||||
assert.Assert(t, !ok)
|
||||
|
@ -164,7 +162,7 @@ func TestSysctlOverride(t *testing.T) {
|
|||
// Ensure the ping_group_range is set on a container in "host" userns mode
|
||||
// on a daemon with user-namespaces enabled
|
||||
c.HostConfig.UsernsMode = "host"
|
||||
s, err = d.createSpec(context.TODO(), c)
|
||||
s, err = d.createSpec(context.TODO(), &config.Config{RemappedRoot: "dummy:dummy"}, c)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, s.Linux.Sysctl["net.ipv4.ping_group_range"], "0 2147483647")
|
||||
}
|
||||
|
@ -184,7 +182,7 @@ func TestSysctlOverrideHost(t *testing.T) {
|
|||
defer cleanupFakeContainer(c)
|
||||
|
||||
// Ensure that the implicit sysctl is not set
|
||||
s, err := d.createSpec(context.TODO(), c)
|
||||
s, err := d.createSpec(context.TODO(), &config.Config{}, c)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], "")
|
||||
assert.Equal(t, s.Linux.Sysctl["net.ipv4.ping_group_range"], "")
|
||||
|
@ -192,7 +190,7 @@ func TestSysctlOverrideHost(t *testing.T) {
|
|||
// Set an explicit sysctl.
|
||||
c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"] = "1024"
|
||||
|
||||
s, err = d.createSpec(context.TODO(), c)
|
||||
s, err = d.createSpec(context.TODO(), &config.Config{}, c)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"])
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
containertypes "github.com/docker/docker/api/types/container"
|
||||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/oci"
|
||||
"github.com/docker/docker/pkg/sysinfo"
|
||||
|
@ -27,7 +28,7 @@ const (
|
|||
credentialSpecFileLocation = "CredentialSpecs"
|
||||
)
|
||||
|
||||
func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (*specs.Spec, error) {
|
||||
func (daemon *Daemon) createSpec(ctx context.Context, daemonCfg *config.Config, c *container.Container) (*specs.Spec, error) {
|
||||
img, err := daemon.imageService.GetImage(ctx, string(c.ImageID), imagetypes.GetImageOpts{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -142,7 +143,7 @@ func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (*
|
|||
return nil, errors.Wrapf(err, "container %s", c.ID)
|
||||
}
|
||||
|
||||
dnsSearch := daemon.getDNSSearchSettings(c)
|
||||
dnsSearch := daemon.getDNSSearchSettings(daemonCfg, c)
|
||||
|
||||
// Get endpoints for the libnetwork allocated networks to the container
|
||||
var epList []string
|
||||
|
@ -404,7 +405,7 @@ func setResourcesInSpec(c *container.Container, s *specs.Spec, isHyperV bool) {
|
|||
|
||||
// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig
|
||||
// It will do nothing on non-Linux platform
|
||||
func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) {
|
||||
func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig, daemonCfg *config.Config) {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -56,6 +56,7 @@ func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.
|
|||
return nil, err
|
||||
}
|
||||
|
||||
cfg := daemon.config()
|
||||
allContainers := daemon.List()
|
||||
for _, c := range allContainers {
|
||||
select {
|
||||
|
@ -77,7 +78,7 @@ func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.
|
|||
return nil, err
|
||||
}
|
||||
// TODO: sets RmLink to true?
|
||||
err = daemon.ContainerRm(c.ID, &types.ContainerRmConfig{})
|
||||
err = daemon.containerRm(cfg, c.ID, &types.ContainerRmConfig{})
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to prune container %s: %v", c.ID, err)
|
||||
continue
|
||||
|
|
270
daemon/reload.go
270
daemon/reload.go
|
@ -5,10 +5,55 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/mitchellh/copystructure"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
)
|
||||
|
||||
// reloadTxn is used to defer side effects of a config reload.
|
||||
type reloadTxn struct {
|
||||
onCommit, onRollback []func() error
|
||||
}
|
||||
|
||||
// OnCommit defers a function to be called when a config reload is being finalized.
|
||||
// The error returned from cb is purely informational.
|
||||
func (tx *reloadTxn) OnCommit(cb func() error) {
|
||||
tx.onCommit = append(tx.onCommit, cb)
|
||||
}
|
||||
|
||||
// OnRollback defers a function to be called when a config reload is aborted.
|
||||
// The error returned from cb is purely informational.
|
||||
func (tx *reloadTxn) OnRollback(cb func() error) {
|
||||
tx.onCommit = append(tx.onRollback, cb)
|
||||
}
|
||||
|
||||
func (tx *reloadTxn) run(cbs []func() error) error {
|
||||
tx.onCommit = nil
|
||||
tx.onRollback = nil
|
||||
|
||||
var res *multierror.Error
|
||||
for _, cb := range cbs {
|
||||
res = multierror.Append(res, cb())
|
||||
}
|
||||
return res.ErrorOrNil()
|
||||
}
|
||||
|
||||
// Commit calls all functions registered with OnCommit.
|
||||
// Any errors returned by the functions are collated into a
|
||||
// *github.com/hashicorp/go-multierror.Error value.
|
||||
func (tx *reloadTxn) Commit() error {
|
||||
return tx.run(tx.onCommit)
|
||||
}
|
||||
|
||||
// Rollback calls all functions registered with OnRollback.
|
||||
// Any errors returned by the functions are collated into a
|
||||
// *github.com/hashicorp/go-multierror.Error value.
|
||||
func (tx *reloadTxn) Rollback() error {
|
||||
return tx.run(tx.onRollback)
|
||||
}
|
||||
|
||||
// Reload modifies the live daemon configuration from conf.
|
||||
// conf is assumed to be a validated configuration.
|
||||
//
|
||||
|
@ -24,66 +69,63 @@ import (
|
|||
// - Insecure registries
|
||||
// - Registry mirrors
|
||||
// - Daemon live restore
|
||||
func (daemon *Daemon) Reload(conf *config.Config) (err error) {
|
||||
daemon.configStore.Lock()
|
||||
attributes := map[string]string{}
|
||||
func (daemon *Daemon) Reload(conf *config.Config) error {
|
||||
daemon.configReload.Lock()
|
||||
defer daemon.configReload.Unlock()
|
||||
copied, err := copystructure.Copy(daemon.config())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newCfg := copied.(*config.Config)
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
jsonString, _ := json.Marshal(&struct {
|
||||
*config.Config
|
||||
config.Proxies `json:"proxies"`
|
||||
}{
|
||||
Config: daemon.configStore,
|
||||
Proxies: config.Proxies{
|
||||
HTTPProxy: config.MaskCredentials(daemon.configStore.HTTPProxy),
|
||||
HTTPSProxy: config.MaskCredentials(daemon.configStore.HTTPSProxy),
|
||||
NoProxy: config.MaskCredentials(daemon.configStore.NoProxy),
|
||||
},
|
||||
})
|
||||
logrus.Infof("Reloaded configuration: %s", jsonString)
|
||||
}
|
||||
daemon.configStore.Unlock()
|
||||
if err == nil {
|
||||
daemon.LogDaemonEventWithAttributes("reload", attributes)
|
||||
}
|
||||
}()
|
||||
attributes := map[string]string{}
|
||||
|
||||
// Ideally reloading should be transactional: the reload either completes
|
||||
// successfully, or the daemon config and state are left untouched. We use a
|
||||
// simplified two-phase commit protocol to achieve this. Any fallible reload
|
||||
// operation is split into two phases. The first phase performs all the fallible
|
||||
// operations without mutating daemon state and returns a closure: its second
|
||||
// phase. The second phase applies the changes to the daemon state. If any
|
||||
// first-phase returns an error, the reload transaction is "rolled back" by
|
||||
// discarding the second-phase closures.
|
||||
// two-phase commit protocol to achieve this. Any fallible reload operation is
|
||||
// split into two phases. The first phase performs all the fallible operations
|
||||
// and mutates the newCfg copy. The second phase atomically swaps newCfg into
|
||||
// the live daemon configuration and executes any commit functions the first
|
||||
// phase registered to apply the side effects. If any first-phase returns an
|
||||
// error, the reload transaction is rolled back by discarding newCfg and
|
||||
// executing any registered rollback functions.
|
||||
|
||||
type TxnCommitter = func(attributes map[string]string)
|
||||
var txns []TxnCommitter
|
||||
for _, prepare := range []func(*config.Config) (TxnCommitter, error){
|
||||
var txn reloadTxn
|
||||
for _, reload := range []func(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error{
|
||||
daemon.reloadPlatform,
|
||||
daemon.reloadDebug,
|
||||
daemon.reloadMaxConcurrentDownloadsAndUploads,
|
||||
daemon.reloadMaxDownloadAttempts,
|
||||
daemon.reloadShutdownTimeout,
|
||||
daemon.reloadFeatures,
|
||||
daemon.reloadLabels,
|
||||
daemon.reloadRegistryConfig,
|
||||
daemon.reloadLiveRestore,
|
||||
daemon.reloadNetworkDiagnosticPort,
|
||||
} {
|
||||
commit, err := prepare(conf)
|
||||
if err != nil {
|
||||
if err := reload(&txn, newCfg, conf, attributes); err != nil {
|
||||
if rollbackErr := txn.Rollback(); rollbackErr != nil {
|
||||
return multierror.Append(nil, err, rollbackErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
txns = append(txns, commit)
|
||||
}
|
||||
|
||||
daemon.reloadDebug(conf, attributes)
|
||||
daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes)
|
||||
daemon.reloadMaxDownloadAttempts(conf, attributes)
|
||||
daemon.reloadShutdownTimeout(conf, attributes)
|
||||
daemon.reloadFeatures(conf, attributes)
|
||||
daemon.reloadLabels(conf, attributes)
|
||||
daemon.reloadLiveRestore(conf, attributes)
|
||||
daemon.reloadNetworkDiagnosticPort(conf, attributes)
|
||||
|
||||
for _, tx := range txns {
|
||||
tx(attributes)
|
||||
}
|
||||
return nil
|
||||
jsonString, _ := json.Marshal(&struct {
|
||||
*config.Config
|
||||
config.Proxies `json:"proxies"`
|
||||
}{
|
||||
Config: newCfg,
|
||||
Proxies: config.Proxies{
|
||||
HTTPProxy: config.MaskCredentials(newCfg.HTTPProxy),
|
||||
HTTPSProxy: config.MaskCredentials(newCfg.HTTPSProxy),
|
||||
NoProxy: config.MaskCredentials(newCfg.NoProxy),
|
||||
},
|
||||
})
|
||||
logrus.Infof("Reloaded configuration: %s", jsonString)
|
||||
daemon.configStore.Store(newCfg)
|
||||
daemon.LogDaemonEventWithAttributes("reload", attributes)
|
||||
return txn.Commit()
|
||||
}
|
||||
|
||||
func marshalAttributeSlice(v []string) string {
|
||||
|
@ -99,145 +141,155 @@ func marshalAttributeSlice(v []string) string {
|
|||
|
||||
// reloadDebug updates configuration with Debug option
|
||||
// and updates the passed attributes
|
||||
func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) {
|
||||
func (daemon *Daemon) reloadDebug(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
|
||||
// update corresponding configuration
|
||||
if conf.IsValueSet("debug") {
|
||||
daemon.configStore.Debug = conf.Debug
|
||||
newCfg.Debug = conf.Debug
|
||||
}
|
||||
// prepare reload event attributes with updatable configurations
|
||||
attributes["debug"] = strconv.FormatBool(daemon.configStore.Debug)
|
||||
attributes["debug"] = strconv.FormatBool(newCfg.Debug)
|
||||
return nil
|
||||
}
|
||||
|
||||
// reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent
|
||||
// download and upload options and updates the passed attributes
|
||||
func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config, attributes map[string]string) {
|
||||
func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
|
||||
// We always "reset" as the cost is lightweight and easy to maintain.
|
||||
daemon.configStore.MaxConcurrentDownloads = config.DefaultMaxConcurrentDownloads
|
||||
daemon.configStore.MaxConcurrentUploads = config.DefaultMaxConcurrentUploads
|
||||
newCfg.MaxConcurrentDownloads = config.DefaultMaxConcurrentDownloads
|
||||
newCfg.MaxConcurrentUploads = config.DefaultMaxConcurrentUploads
|
||||
|
||||
if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != 0 {
|
||||
daemon.configStore.MaxConcurrentDownloads = conf.MaxConcurrentDownloads
|
||||
newCfg.MaxConcurrentDownloads = conf.MaxConcurrentDownloads
|
||||
}
|
||||
if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != 0 {
|
||||
daemon.configStore.MaxConcurrentUploads = conf.MaxConcurrentUploads
|
||||
}
|
||||
if daemon.imageService != nil {
|
||||
daemon.imageService.UpdateConfig(
|
||||
daemon.configStore.MaxConcurrentDownloads,
|
||||
daemon.configStore.MaxConcurrentUploads,
|
||||
)
|
||||
newCfg.MaxConcurrentUploads = conf.MaxConcurrentUploads
|
||||
}
|
||||
txn.OnCommit(func() error {
|
||||
if daemon.imageService != nil {
|
||||
daemon.imageService.UpdateConfig(
|
||||
newCfg.MaxConcurrentDownloads,
|
||||
newCfg.MaxConcurrentUploads,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// prepare reload event attributes with updatable configurations
|
||||
attributes["max-concurrent-downloads"] = strconv.Itoa(daemon.configStore.MaxConcurrentDownloads)
|
||||
attributes["max-concurrent-uploads"] = strconv.Itoa(daemon.configStore.MaxConcurrentUploads)
|
||||
attributes["max-concurrent-downloads"] = strconv.Itoa(newCfg.MaxConcurrentDownloads)
|
||||
attributes["max-concurrent-uploads"] = strconv.Itoa(newCfg.MaxConcurrentUploads)
|
||||
logrus.Debug("Reset Max Concurrent Downloads: ", attributes["max-concurrent-downloads"])
|
||||
logrus.Debug("Reset Max Concurrent Uploads: ", attributes["max-concurrent-uploads"])
|
||||
return nil
|
||||
}
|
||||
|
||||
// reloadMaxDownloadAttempts updates configuration with max concurrent
|
||||
// download attempts when a connection is lost and updates the passed attributes
|
||||
func (daemon *Daemon) reloadMaxDownloadAttempts(conf *config.Config, attributes map[string]string) {
|
||||
func (daemon *Daemon) reloadMaxDownloadAttempts(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
|
||||
// We always "reset" as the cost is lightweight and easy to maintain.
|
||||
daemon.configStore.MaxDownloadAttempts = config.DefaultDownloadAttempts
|
||||
newCfg.MaxDownloadAttempts = config.DefaultDownloadAttempts
|
||||
if conf.IsValueSet("max-download-attempts") && conf.MaxDownloadAttempts != 0 {
|
||||
daemon.configStore.MaxDownloadAttempts = conf.MaxDownloadAttempts
|
||||
newCfg.MaxDownloadAttempts = conf.MaxDownloadAttempts
|
||||
}
|
||||
|
||||
// prepare reload event attributes with updatable configurations
|
||||
attributes["max-download-attempts"] = strconv.Itoa(daemon.configStore.MaxDownloadAttempts)
|
||||
attributes["max-download-attempts"] = strconv.Itoa(newCfg.MaxDownloadAttempts)
|
||||
logrus.Debug("Reset Max Download Attempts: ", attributes["max-download-attempts"])
|
||||
return nil
|
||||
}
|
||||
|
||||
// reloadShutdownTimeout updates configuration with daemon shutdown timeout option
|
||||
// and updates the passed attributes
|
||||
func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) {
|
||||
func (daemon *Daemon) reloadShutdownTimeout(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
|
||||
// update corresponding configuration
|
||||
if conf.IsValueSet("shutdown-timeout") {
|
||||
daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout
|
||||
logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout)
|
||||
newCfg.ShutdownTimeout = conf.ShutdownTimeout
|
||||
logrus.Debugf("Reset Shutdown Timeout: %d", newCfg.ShutdownTimeout)
|
||||
}
|
||||
|
||||
// prepare reload event attributes with updatable configurations
|
||||
attributes["shutdown-timeout"] = strconv.Itoa(daemon.configStore.ShutdownTimeout)
|
||||
attributes["shutdown-timeout"] = strconv.Itoa(newCfg.ShutdownTimeout)
|
||||
return nil
|
||||
}
|
||||
|
||||
// reloadLabels updates configuration with engine labels
|
||||
// and updates the passed attributes
|
||||
func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) {
|
||||
func (daemon *Daemon) reloadLabels(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
|
||||
// update corresponding configuration
|
||||
if conf.IsValueSet("labels") {
|
||||
daemon.configStore.Labels = conf.Labels
|
||||
newCfg.Labels = conf.Labels
|
||||
}
|
||||
|
||||
// prepare reload event attributes with updatable configurations
|
||||
attributes["labels"] = marshalAttributeSlice(daemon.configStore.Labels)
|
||||
attributes["labels"] = marshalAttributeSlice(newCfg.Labels)
|
||||
return nil
|
||||
}
|
||||
|
||||
// reloadRegistryConfig updates the configuration with registry options
|
||||
// and updates the passed attributes.
|
||||
func (daemon *Daemon) reloadRegistryConfig(conf *config.Config) (func(map[string]string), error) {
|
||||
func (daemon *Daemon) reloadRegistryConfig(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
|
||||
// Update corresponding configuration.
|
||||
opts := daemon.configStore.ServiceOptions
|
||||
|
||||
if conf.IsValueSet("allow-nondistributable-artifacts") {
|
||||
opts.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts
|
||||
newCfg.ServiceOptions.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts
|
||||
}
|
||||
if conf.IsValueSet("insecure-registries") {
|
||||
opts.InsecureRegistries = conf.InsecureRegistries
|
||||
newCfg.ServiceOptions.InsecureRegistries = conf.InsecureRegistries
|
||||
}
|
||||
if conf.IsValueSet("registry-mirrors") {
|
||||
opts.Mirrors = conf.Mirrors
|
||||
newCfg.ServiceOptions.Mirrors = conf.Mirrors
|
||||
}
|
||||
|
||||
commit, err := daemon.registryService.ReplaceConfig(opts)
|
||||
commit, err := daemon.registryService.ReplaceConfig(newCfg.ServiceOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
txn.OnCommit(func() error { commit(); return nil })
|
||||
|
||||
return func(attributes map[string]string) {
|
||||
commit()
|
||||
daemon.configStore.ServiceOptions = opts
|
||||
// Prepare reload event attributes with updatable configurations.
|
||||
attributes["allow-nondistributable-artifacts"] = marshalAttributeSlice(daemon.configStore.AllowNondistributableArtifacts)
|
||||
attributes["insecure-registries"] = marshalAttributeSlice(daemon.configStore.InsecureRegistries)
|
||||
attributes["registry-mirrors"] = marshalAttributeSlice(daemon.configStore.Mirrors)
|
||||
}, nil
|
||||
attributes["allow-nondistributable-artifacts"] = marshalAttributeSlice(newCfg.ServiceOptions.AllowNondistributableArtifacts)
|
||||
attributes["insecure-registries"] = marshalAttributeSlice(newCfg.ServiceOptions.InsecureRegistries)
|
||||
attributes["registry-mirrors"] = marshalAttributeSlice(newCfg.ServiceOptions.Mirrors)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reloadLiveRestore updates configuration with live restore option
|
||||
// and updates the passed attributes
|
||||
func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) {
|
||||
func (daemon *Daemon) reloadLiveRestore(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
|
||||
// update corresponding configuration
|
||||
if conf.IsValueSet("live-restore") {
|
||||
daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled
|
||||
newCfg.LiveRestoreEnabled = conf.LiveRestoreEnabled
|
||||
}
|
||||
|
||||
// prepare reload event attributes with updatable configurations
|
||||
attributes["live-restore"] = strconv.FormatBool(daemon.configStore.LiveRestoreEnabled)
|
||||
attributes["live-restore"] = strconv.FormatBool(newCfg.LiveRestoreEnabled)
|
||||
return nil
|
||||
}
|
||||
|
||||
// reloadNetworkDiagnosticPort updates the network controller starting the diagnostic if the config is valid
|
||||
func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attributes map[string]string) {
|
||||
if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") ||
|
||||
conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 {
|
||||
// If there is no config make sure that the diagnostic is off
|
||||
if daemon.netController != nil {
|
||||
daemon.netController.StopDiagnostic()
|
||||
func (daemon *Daemon) reloadNetworkDiagnosticPort(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
|
||||
txn.OnCommit(func() error {
|
||||
if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") ||
|
||||
conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 {
|
||||
// If there is no config make sure that the diagnostic is off
|
||||
if daemon.netController != nil {
|
||||
daemon.netController.StopDiagnostic()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return
|
||||
}
|
||||
// Enable the network diagnostic if the flag is set with a valid port within the range
|
||||
logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server")
|
||||
daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort)
|
||||
// Enable the network diagnostic if the flag is set with a valid port within the range
|
||||
logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server")
|
||||
daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// reloadFeatures updates configuration with enabled/disabled features
|
||||
func (daemon *Daemon) reloadFeatures(conf *config.Config, attributes map[string]string) {
|
||||
func (daemon *Daemon) reloadFeatures(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
|
||||
// update corresponding configuration
|
||||
// note that we allow features option to be entirely unset
|
||||
daemon.configStore.Features = conf.Features
|
||||
newCfg.Features = conf.Features
|
||||
|
||||
// prepare reload event attributes with updatable configurations
|
||||
attributes["features"] = fmt.Sprintf("%v", daemon.configStore.Features)
|
||||
attributes["features"] = fmt.Sprintf("%v", newCfg.Features)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -22,12 +22,12 @@ func muteLogs() {
|
|||
func newDaemonForReloadT(t *testing.T, cfg *config.Config) *Daemon {
|
||||
t.Helper()
|
||||
daemon := &Daemon{
|
||||
configStore: cfg,
|
||||
imageService: images.NewImageService(images.ImageServiceConfig{}),
|
||||
}
|
||||
var err error
|
||||
daemon.registryService, err = registry.NewService(registry.ServiceOptions{})
|
||||
assert.Assert(t, err)
|
||||
daemon.configStore.Store(cfg)
|
||||
return daemon
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,7 @@ func TestDaemonReloadLabels(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
label := daemon.configStore.Labels[0]
|
||||
label := daemon.config().Labels[0]
|
||||
if label != "foo:baz" {
|
||||
t.Fatalf("Expected daemon label `foo:baz`, got %s", label)
|
||||
}
|
||||
|
@ -131,8 +131,6 @@ func TestDaemonReloadMirrors(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
daemon.configStore = &config.Config{}
|
||||
|
||||
type pair struct {
|
||||
valid bool
|
||||
mirrors []string
|
||||
|
@ -234,8 +232,6 @@ func TestDaemonReloadInsecureRegistries(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
daemon.configStore = &config.Config{}
|
||||
|
||||
insecureRegistries := []string{
|
||||
"127.0.0.0/8", // this will be kept
|
||||
"10.10.1.11:5000", // this will be kept
|
||||
|
@ -335,11 +331,11 @@ func TestDaemonReloadNotAffectOthers(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
label := daemon.configStore.Labels[0]
|
||||
label := daemon.config().Labels[0]
|
||||
if label != "foo:baz" {
|
||||
t.Fatalf("Expected daemon label `foo:baz`, got %s", label)
|
||||
}
|
||||
debug := daemon.configStore.Debug
|
||||
debug := daemon.config().Debug
|
||||
if !debug {
|
||||
t.Fatal("Expected debug 'enabled', got 'disabled'")
|
||||
}
|
||||
|
@ -360,7 +356,7 @@ func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
netOptions, err := daemon.networkOptions(nil, nil)
|
||||
netOptions, err := daemon.networkOptions(&config.Config{}, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -6,62 +6,46 @@ import (
|
|||
"bytes"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
)
|
||||
|
||||
// reloadPlatform updates configuration with platform specific options
|
||||
// and updates the passed attributes
|
||||
func (daemon *Daemon) reloadPlatform(conf *config.Config) (func(attributes map[string]string), error) {
|
||||
var txns []func()
|
||||
|
||||
if conf.IsValueSet("runtimes") {
|
||||
// Always set the default one
|
||||
conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: config.DefaultRuntimeBinary}
|
||||
if err := daemon.initRuntimes(conf.Runtimes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txns = append(txns, func() {
|
||||
daemon.configStore.Runtimes = conf.Runtimes
|
||||
})
|
||||
}
|
||||
|
||||
func (daemon *Daemon) reloadPlatform(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
|
||||
if conf.DefaultRuntime != "" {
|
||||
txns = append(txns, func() {
|
||||
daemon.configStore.DefaultRuntime = conf.DefaultRuntime
|
||||
})
|
||||
newCfg.DefaultRuntime = conf.DefaultRuntime
|
||||
}
|
||||
if conf.IsValueSet("runtimes") {
|
||||
newCfg.Runtimes = conf.Runtimes
|
||||
txn.OnCommit(func() error { return daemon.initRuntimes(newCfg) })
|
||||
}
|
||||
configureRuntimes(newCfg)
|
||||
|
||||
if conf.IsValueSet("default-shm-size") {
|
||||
newCfg.ShmSize = conf.ShmSize
|
||||
}
|
||||
|
||||
return func(attributes map[string]string) {
|
||||
for _, commit := range txns {
|
||||
commit()
|
||||
}
|
||||
if conf.CgroupNamespaceMode != "" {
|
||||
newCfg.CgroupNamespaceMode = conf.CgroupNamespaceMode
|
||||
}
|
||||
|
||||
if conf.IsValueSet("default-shm-size") {
|
||||
daemon.configStore.ShmSize = conf.ShmSize
|
||||
}
|
||||
if conf.IpcMode != "" {
|
||||
newCfg.IpcMode = conf.IpcMode
|
||||
}
|
||||
|
||||
if conf.CgroupNamespaceMode != "" {
|
||||
daemon.configStore.CgroupNamespaceMode = conf.CgroupNamespaceMode
|
||||
// Update attributes
|
||||
var runtimeList bytes.Buffer
|
||||
for name, rt := range newCfg.Runtimes {
|
||||
if runtimeList.Len() > 0 {
|
||||
runtimeList.WriteRune(' ')
|
||||
}
|
||||
runtimeList.WriteString(name + ":" + rt.Path)
|
||||
}
|
||||
|
||||
if conf.IpcMode != "" {
|
||||
daemon.configStore.IpcMode = conf.IpcMode
|
||||
}
|
||||
|
||||
// Update attributes
|
||||
var runtimeList bytes.Buffer
|
||||
for name, rt := range daemon.configStore.Runtimes {
|
||||
if runtimeList.Len() > 0 {
|
||||
runtimeList.WriteRune(' ')
|
||||
}
|
||||
runtimeList.WriteString(name + ":" + rt.Path)
|
||||
}
|
||||
|
||||
attributes["runtimes"] = runtimeList.String()
|
||||
attributes["default-runtime"] = daemon.configStore.DefaultRuntime
|
||||
attributes["default-shm-size"] = strconv.FormatInt(int64(daemon.configStore.ShmSize), 10)
|
||||
attributes["default-ipc-mode"] = daemon.configStore.IpcMode
|
||||
attributes["default-cgroupns-mode"] = daemon.configStore.CgroupNamespaceMode
|
||||
}, nil
|
||||
attributes["runtimes"] = runtimeList.String()
|
||||
attributes["default-runtime"] = newCfg.DefaultRuntime
|
||||
attributes["default-shm-size"] = strconv.FormatInt(int64(newCfg.ShmSize), 10)
|
||||
attributes["default-ipc-mode"] = newCfg.IpcMode
|
||||
attributes["default-cgroupns-mode"] = newCfg.CgroupNamespaceMode
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,6 @@ import "github.com/docker/docker/daemon/config"
|
|||
|
||||
// reloadPlatform updates configuration with platform specific options
|
||||
// and updates the passed attributes
|
||||
func (daemon *Daemon) reloadPlatform(conf *config.Config) (func(attributes map[string]string), error) {
|
||||
return func(map[string]string) {}, nil
|
||||
func (daemon *Daemon) reloadPlatform(txn *reloadTxn, newCfg, conf *config.Config, attributes map[string]string) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
)
|
||||
|
||||
// ContainerRestart stops and starts a container. It attempts to
|
||||
|
@ -19,7 +20,7 @@ func (daemon *Daemon) ContainerRestart(ctx context.Context, name string, options
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = daemon.containerRestart(ctx, ctr, options)
|
||||
err = daemon.containerRestart(ctx, daemon.config(), ctr, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot restart container %s: %v", name, err)
|
||||
}
|
||||
|
@ -30,7 +31,7 @@ func (daemon *Daemon) ContainerRestart(ctx context.Context, name string, options
|
|||
// container. When stopping, wait for the given duration in seconds to
|
||||
// gracefully stop, before forcefully terminating the container. If
|
||||
// given a negative duration, wait forever for a graceful stop.
|
||||
func (daemon *Daemon) containerRestart(ctx context.Context, container *container.Container, options containertypes.StopOptions) error {
|
||||
func (daemon *Daemon) containerRestart(ctx context.Context, daemonCfg *config.Config, container *container.Container, options containertypes.StopOptions) error {
|
||||
// Determine isolation. If not specified in the hostconfig, use daemon default.
|
||||
actualIsolation := container.HostConfig.Isolation
|
||||
if containertypes.Isolation.IsDefault(actualIsolation) {
|
||||
|
@ -61,7 +62,7 @@ func (daemon *Daemon) containerRestart(ctx context.Context, container *container
|
|||
}
|
||||
}
|
||||
|
||||
if err := daemon.containerStart(ctx, container, "", "", true); err != nil {
|
||||
if err := daemon.containerStart(ctx, daemonCfg, container, "", "", true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -51,15 +51,15 @@ func defaultV2ShimConfig(conf *config.Config, runtimePath string) *types.ShimCon
|
|||
}
|
||||
|
||||
func (daemon *Daemon) loadRuntimes() error {
|
||||
return daemon.initRuntimes(daemon.configStore.Runtimes)
|
||||
return daemon.initRuntimes(daemon.config())
|
||||
}
|
||||
|
||||
func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error) {
|
||||
runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes")
|
||||
func (daemon *Daemon) initRuntimes(cfg *config.Config) (err error) {
|
||||
runtimeDir := filepath.Join(cfg.Root, "runtimes")
|
||||
runtimeOldDir := runtimeDir + "-old"
|
||||
// Remove old temp directory if any
|
||||
os.RemoveAll(runtimeOldDir)
|
||||
tmpDir, err := os.MkdirTemp(daemon.configStore.Root, "gen-runtimes")
|
||||
tmpDir, err := os.MkdirTemp(cfg.Root, "gen-runtimes")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get temp dir to generate runtime scripts")
|
||||
}
|
||||
|
@ -91,8 +91,8 @@ func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error
|
|||
}
|
||||
}()
|
||||
|
||||
for name := range runtimes {
|
||||
rt := runtimes[name]
|
||||
for name := range cfg.Runtimes {
|
||||
rt := cfg.Runtimes[name]
|
||||
if rt.Path == "" && rt.Type == "" {
|
||||
return errors.Errorf("runtime %s: either a runtimeType or a path must be configured", name)
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error
|
|||
return err
|
||||
}
|
||||
}
|
||||
rt.ShimConfig = defaultV2ShimConfig(daemon.configStore, daemon.rewriteRuntimePath(name, rt.Path, rt.Args))
|
||||
rt.ShimConfig = defaultV2ShimConfig(cfg, daemon.rewriteRuntimePath(cfg, name, rt.Path, rt.Args))
|
||||
var featuresStderr bytes.Buffer
|
||||
featuresCmd := exec.Command(rt.Path, append(rt.Args, "features")...)
|
||||
featuresCmd.Stderr = &featuresStderr
|
||||
|
@ -139,7 +139,7 @@ func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error
|
|||
}
|
||||
}
|
||||
}
|
||||
runtimes[name] = rt
|
||||
cfg.Runtimes[name] = rt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -147,16 +147,16 @@ func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error
|
|||
// rewriteRuntimePath is used for runtimes which have custom arguments supplied.
|
||||
// This is needed because the containerd API only calls the OCI runtime binary, there is no options for extra arguments.
|
||||
// To support this case, the daemon wraps the specified runtime in a script that passes through those arguments.
|
||||
func (daemon *Daemon) rewriteRuntimePath(name, p string, args []string) string {
|
||||
func (daemon *Daemon) rewriteRuntimePath(cfg *config.Config, name, p string, args []string) string {
|
||||
if len(args) == 0 {
|
||||
return p
|
||||
}
|
||||
|
||||
return filepath.Join(daemon.configStore.Root, "runtimes", name)
|
||||
return filepath.Join(cfg.Root, "runtimes", name)
|
||||
}
|
||||
|
||||
func (daemon *Daemon) getRuntime(name string) (shim string, opts interface{}, err error) {
|
||||
rt := daemon.configStore.GetRuntime(name)
|
||||
func (daemon *Daemon) getRuntime(cfg *config.Config, name string) (shim string, opts interface{}, err error) {
|
||||
rt := cfg.GetRuntime(name)
|
||||
if rt == nil {
|
||||
if !config.IsPermissibleC8dRuntimeName(name) {
|
||||
return "", nil, errdefs.InvalidParameter(errors.Errorf("unknown or invalid runtime name: %s", name))
|
||||
|
|
|
@ -86,11 +86,13 @@ func TestInitRuntimes_InvalidConfigs(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg, err := config.New()
|
||||
assert.NilError(t, err)
|
||||
d := &Daemon{configStore: cfg}
|
||||
d.configStore.Root = t.TempDir()
|
||||
assert.Assert(t, os.Mkdir(filepath.Join(d.configStore.Root, "runtimes"), 0700))
|
||||
cfg.Root = t.TempDir()
|
||||
cfg.Runtimes["myruntime"] = tt.runtime
|
||||
d := &Daemon{}
|
||||
d.configStore.Store(cfg)
|
||||
assert.Assert(t, os.Mkdir(filepath.Join(d.config().Root, "runtimes"), 0700))
|
||||
|
||||
err = d.initRuntimes(map[string]types.Runtime{"myruntime": tt.runtime})
|
||||
err = d.initRuntimes(d.config())
|
||||
assert.Check(t, is.ErrorContains(err, tt.expectErr))
|
||||
})
|
||||
}
|
||||
|
@ -124,20 +126,22 @@ func TestGetRuntime(t *testing.T) {
|
|||
cfg, err := config.New()
|
||||
assert.NilError(t, err)
|
||||
|
||||
d := &Daemon{configStore: cfg}
|
||||
d.configStore.Root = t.TempDir()
|
||||
assert.Assert(t, os.Mkdir(filepath.Join(d.configStore.Root, "runtimes"), 0700))
|
||||
d.configStore.Runtimes = map[string]types.Runtime{
|
||||
cfg.Root = t.TempDir()
|
||||
assert.Assert(t, os.Mkdir(filepath.Join(cfg.Root, "runtimes"), 0700))
|
||||
cfg.Runtimes = map[string]types.Runtime{
|
||||
configuredRtName: configuredRuntime,
|
||||
rtWithArgsName: rtWithArgs,
|
||||
shimWithOptsName: shimWithOpts,
|
||||
shimAliasName: shimAlias,
|
||||
configuredShimByPathName: configuredShimByPath,
|
||||
}
|
||||
configureRuntimes(d.configStore)
|
||||
configureRuntimes(cfg)
|
||||
|
||||
d := &Daemon{}
|
||||
d.configStore.Store(cfg)
|
||||
assert.Assert(t, d.loadRuntimes())
|
||||
|
||||
stockRuntime, ok := d.configStore.Runtimes[config.StockRuntimeName]
|
||||
stockRuntime, ok := cfg.Runtimes[config.StockRuntimeName]
|
||||
assert.Assert(t, ok, "stock runtime could not be found (test needs to be updated)")
|
||||
|
||||
configdOpts := *stockRuntime.ShimConfig.Opts.(*v2runcoptions.Options)
|
||||
|
@ -199,8 +203,9 @@ func TestGetRuntime(t *testing.T) {
|
|||
runtime: rtWithArgsName,
|
||||
wantShim: stockRuntime.ShimConfig.Binary,
|
||||
wantOpts: defaultV2ShimConfig(
|
||||
d.configStore,
|
||||
d.config(),
|
||||
d.rewriteRuntimePath(
|
||||
d.config(),
|
||||
rtWithArgsName,
|
||||
rtWithArgs.Path,
|
||||
rtWithArgs.Args)).Opts,
|
||||
|
@ -224,7 +229,7 @@ func TestGetRuntime(t *testing.T) {
|
|||
} {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotShim, gotOpts, err := d.getRuntime(tt.runtime)
|
||||
gotShim, gotOpts, err := d.getRuntime(cfg, tt.runtime)
|
||||
assert.Check(t, is.Equal(gotShim, tt.wantShim))
|
||||
assert.Check(t, is.DeepEqual(gotOpts, tt.wantOpts))
|
||||
if tt.wantShim != "" {
|
||||
|
|
|
@ -2,8 +2,10 @@ package daemon
|
|||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
)
|
||||
|
||||
func (daemon *Daemon) getRuntime(name string) (shim string, opts interface{}, err error) {
|
||||
func (daemon *Daemon) getRuntime(cfg *config.Config, name string) (shim string, opts interface{}, err error) {
|
||||
return "", nil, errors.New("not implemented")
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libcontainerd"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -17,7 +18,8 @@ import (
|
|||
|
||||
// ContainerStart starts a container.
|
||||
func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error {
|
||||
if checkpoint != "" && !daemon.HasExperimental() {
|
||||
daemonCfg := daemon.config()
|
||||
if checkpoint != "" && !daemonCfg.Experimental {
|
||||
return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode"))
|
||||
}
|
||||
|
||||
|
@ -55,7 +57,7 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
|
|||
if hostConfig != nil {
|
||||
logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
|
||||
oldNetworkMode := ctr.HostConfig.NetworkMode
|
||||
if err := daemon.setSecurityOptions(ctr, hostConfig); err != nil {
|
||||
if err := daemon.setSecurityOptions(daemonCfg, ctr, hostConfig); err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
|
||||
|
@ -83,24 +85,24 @@ func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfi
|
|||
|
||||
// check if hostConfig is in line with the current system settings.
|
||||
// It may happen cgroups are umounted or the like.
|
||||
if _, err = daemon.verifyContainerSettings(ctr.HostConfig, nil, false); err != nil {
|
||||
if _, err = daemon.verifyContainerSettings(daemonCfg, ctr.HostConfig, nil, false); err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
// Adapt for old containers in case we have updates in this function and
|
||||
// old containers never have chance to call the new function in create stage.
|
||||
if hostConfig != nil {
|
||||
if err := daemon.adaptContainerSettings(ctr.HostConfig, false); err != nil {
|
||||
if err := daemon.adaptContainerSettings(daemonCfg, ctr.HostConfig, false); err != nil {
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
}
|
||||
return daemon.containerStart(ctx, ctr, checkpoint, checkpointDir, true)
|
||||
return daemon.containerStart(ctx, daemonCfg, ctr, checkpoint, checkpointDir, true)
|
||||
}
|
||||
|
||||
// containerStart prepares the container to run by setting up everything the
|
||||
// container needs, such as storage and networking, as well as links
|
||||
// between containers. The container is left waiting for a signal to
|
||||
// begin running.
|
||||
func (daemon *Daemon) containerStart(ctx context.Context, container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (retErr error) {
|
||||
func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *config.Config, container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (retErr error) {
|
||||
start := time.Now()
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
@ -136,7 +138,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C
|
|||
// if containers AutoRemove flag is set, remove it after clean up
|
||||
if container.HostConfig.AutoRemove {
|
||||
container.Unlock()
|
||||
if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
||||
if err := daemon.containerRm(daemonCfg, container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
||||
logrus.Errorf("can't remove container %s: %v", container.ID, err)
|
||||
}
|
||||
container.Lock()
|
||||
|
@ -148,11 +150,11 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C
|
|||
return err
|
||||
}
|
||||
|
||||
if err := daemon.initializeNetworking(container); err != nil {
|
||||
if err := daemon.initializeNetworking(daemonCfg, container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
spec, err := daemon.createSpec(ctx, container)
|
||||
spec, err := daemon.createSpec(ctx, daemonCfg, container)
|
||||
if err != nil {
|
||||
return errdefs.System(err)
|
||||
}
|
||||
|
@ -173,7 +175,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C
|
|||
}
|
||||
}
|
||||
|
||||
shim, createOptions, err := daemon.getLibcontainerdCreateOptions(container)
|
||||
shim, createOptions, err := daemon.getLibcontainerdCreateOptions(daemonCfg, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -4,17 +4,18 @@ package daemon // import "github.com/docker/docker/daemon"
|
|||
|
||||
import (
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
)
|
||||
|
||||
// getLibcontainerdCreateOptions callers must hold a lock on the container
|
||||
func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (string, interface{}, error) {
|
||||
func (daemon *Daemon) getLibcontainerdCreateOptions(daemonCfg *config.Config, container *container.Container) (string, interface{}, error) {
|
||||
// Ensure a runtime has been assigned to this container
|
||||
if container.HostConfig.Runtime == "" {
|
||||
container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
|
||||
container.HostConfig.Runtime = daemonCfg.DefaultRuntime
|
||||
container.CheckpointTo(daemon.containersReplica)
|
||||
}
|
||||
|
||||
binary, opts, err := daemon.getRuntime(container.HostConfig.Runtime)
|
||||
binary, opts, err := daemon.getRuntime(daemonCfg, container.HostConfig.Runtime)
|
||||
if err != nil {
|
||||
return "", nil, setExitCodeFromError(container.SetExitCode, err)
|
||||
}
|
||||
|
|
|
@ -3,10 +3,11 @@ package daemon // import "github.com/docker/docker/daemon"
|
|||
import (
|
||||
"github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func (daemon *Daemon) getLibcontainerdCreateOptions(_ *container.Container) (string, interface{}, error) {
|
||||
func (daemon *Daemon) getLibcontainerdCreateOptions(*config.Config, *container.Container) (string, interface{}, error) {
|
||||
if system.ContainerdRuntimeSupported() {
|
||||
opts := &options.Options{}
|
||||
return "io.containerd.runhcs.v1", opts, nil
|
||||
|
|
|
@ -13,7 +13,8 @@ import (
|
|||
func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) {
|
||||
var warnings []string
|
||||
|
||||
warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true)
|
||||
daemonCfg := daemon.config()
|
||||
warnings, err := daemon.verifyContainerSettings(daemonCfg, hostConfig, nil, true)
|
||||
if err != nil {
|
||||
return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
|
|
@ -58,6 +58,7 @@ require (
|
|||
github.com/klauspost/compress v1.16.3
|
||||
github.com/miekg/dns v1.1.43
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1
|
||||
github.com/mitchellh/copystructure v1.2.0
|
||||
github.com/moby/buildkit v0.11.7-0.20230525183624-798ad6b0ce9f
|
||||
github.com/moby/ipvs v1.1.0
|
||||
github.com/moby/locker v1.0.1
|
||||
|
@ -154,6 +155,9 @@ require (
|
|||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.1.4 // indirect
|
||||
github.com/onsi/gomega v1.20.1 // indirect
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 // indirect
|
||||
github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 // indirect
|
||||
github.com/philhofer/fwd v1.1.2 // indirect
|
||||
|
|
10
vendor.sum
10
vendor.sum
|
@ -1034,6 +1034,8 @@ github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go
|
|||
github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
|
||||
|
@ -1048,6 +1050,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
|
|||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
|
||||
github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
|
||||
github.com/moby/buildkit v0.11.7-0.20230525183624-798ad6b0ce9f h1:9wobL03Y6U8azuDLUqYblbUdVU9jpjqecDdW7w4wZtI=
|
||||
|
@ -1121,8 +1125,9 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0
|
|||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
|
@ -1133,8 +1138,9 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
|
|||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
||||
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
|
|
21
vendor/github.com/mitchellh/copystructure/LICENSE
generated
vendored
Normal file
21
vendor/github.com/mitchellh/copystructure/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
21
vendor/github.com/mitchellh/copystructure/README.md
generated
vendored
Normal file
21
vendor/github.com/mitchellh/copystructure/README.md
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
# copystructure
|
||||
|
||||
copystructure is a Go library for deep copying values in Go.
|
||||
|
||||
This allows you to copy Go values that may contain reference values
|
||||
such as maps, slices, or pointers, and copy their data as well instead
|
||||
of just their references.
|
||||
|
||||
## Installation
|
||||
|
||||
Standard `go get`:
|
||||
|
||||
```
|
||||
$ go get github.com/mitchellh/copystructure
|
||||
```
|
||||
|
||||
## Usage & Example
|
||||
|
||||
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure).
|
||||
|
||||
The `Copy` function has examples associated with it there.
|
15
vendor/github.com/mitchellh/copystructure/copier_time.go
generated
vendored
Normal file
15
vendor/github.com/mitchellh/copystructure/copier_time.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package copystructure
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Copiers[reflect.TypeOf(time.Time{})] = timeCopier
|
||||
}
|
||||
|
||||
func timeCopier(v interface{}) (interface{}, error) {
|
||||
// Just... copy it.
|
||||
return v.(time.Time), nil
|
||||
}
|
631
vendor/github.com/mitchellh/copystructure/copystructure.go
generated
vendored
Normal file
631
vendor/github.com/mitchellh/copystructure/copystructure.go
generated
vendored
Normal file
|
@ -0,0 +1,631 @@
|
|||
package copystructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/mitchellh/reflectwalk"
|
||||
)
|
||||
|
||||
const tagKey = "copy"
|
||||
|
||||
// Copy returns a deep copy of v.
|
||||
//
|
||||
// Copy is unable to copy unexported fields in a struct (lowercase field names).
|
||||
// Unexported fields can't be reflected by the Go runtime and therefore
|
||||
// copystructure can't perform any data copies.
|
||||
//
|
||||
// For structs, copy behavior can be controlled with struct tags. For example:
|
||||
//
|
||||
// struct {
|
||||
// Name string
|
||||
// Data *bytes.Buffer `copy:"shallow"`
|
||||
// }
|
||||
//
|
||||
// The available tag values are:
|
||||
//
|
||||
// * "ignore" - The field will be ignored, effectively resulting in it being
|
||||
// assigned the zero value in the copy.
|
||||
//
|
||||
// * "shallow" - The field will be be shallow copied. This means that references
|
||||
// values such as pointers, maps, slices, etc. will be directly assigned
|
||||
// versus deep copied.
|
||||
//
|
||||
func Copy(v interface{}) (interface{}, error) {
|
||||
return Config{}.Copy(v)
|
||||
}
|
||||
|
||||
// CopierFunc is a function that knows how to deep copy a specific type.
|
||||
// Register these globally with the Copiers variable.
|
||||
type CopierFunc func(interface{}) (interface{}, error)
|
||||
|
||||
// Copiers is a map of types that behave specially when they are copied.
|
||||
// If a type is found in this map while deep copying, this function
|
||||
// will be called to copy it instead of attempting to copy all fields.
|
||||
//
|
||||
// The key should be the type, obtained using: reflect.TypeOf(value with type).
|
||||
//
|
||||
// It is unsafe to write to this map after Copies have started. If you
|
||||
// are writing to this map while also copying, wrap all modifications to
|
||||
// this map as well as to Copy in a mutex.
|
||||
var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
|
||||
|
||||
// ShallowCopiers is a map of pointer types that behave specially
|
||||
// when they are copied. If a type is found in this map while deep
|
||||
// copying, the pointer value will be shallow copied and not walked
|
||||
// into.
|
||||
//
|
||||
// The key should be the type, obtained using: reflect.TypeOf(value
|
||||
// with type).
|
||||
//
|
||||
// It is unsafe to write to this map after Copies have started. If you
|
||||
// are writing to this map while also copying, wrap all modifications to
|
||||
// this map as well as to Copy in a mutex.
|
||||
var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{})
|
||||
|
||||
// Must is a helper that wraps a call to a function returning
|
||||
// (interface{}, error) and panics if the error is non-nil. It is intended
|
||||
// for use in variable initializations and should only be used when a copy
|
||||
// error should be a crashing case.
|
||||
func Must(v interface{}, err error) interface{} {
|
||||
if err != nil {
|
||||
panic("copy error: " + err.Error())
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true")
|
||||
|
||||
type Config struct {
|
||||
// Lock any types that are a sync.Locker and are not a mutex while copying.
|
||||
// If there is an RLocker method, use that to get the sync.Locker.
|
||||
Lock bool
|
||||
|
||||
// Copiers is a map of types associated with a CopierFunc. Use the global
|
||||
// Copiers map if this is nil.
|
||||
Copiers map[reflect.Type]CopierFunc
|
||||
|
||||
// ShallowCopiers is a map of pointer types that when they are
|
||||
// shallow copied no matter where they are encountered. Use the
|
||||
// global ShallowCopiers if this is nil.
|
||||
ShallowCopiers map[reflect.Type]struct{}
|
||||
}
|
||||
|
||||
func (c Config) Copy(v interface{}) (interface{}, error) {
|
||||
if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr {
|
||||
return nil, errPointerRequired
|
||||
}
|
||||
|
||||
w := new(walker)
|
||||
if c.Lock {
|
||||
w.useLocks = true
|
||||
}
|
||||
|
||||
if c.Copiers == nil {
|
||||
c.Copiers = Copiers
|
||||
}
|
||||
w.copiers = c.Copiers
|
||||
|
||||
if c.ShallowCopiers == nil {
|
||||
c.ShallowCopiers = ShallowCopiers
|
||||
}
|
||||
w.shallowCopiers = c.ShallowCopiers
|
||||
|
||||
err := reflectwalk.Walk(v, w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the result. If the result is nil, then we want to turn it
|
||||
// into a typed nil if we can.
|
||||
result := w.Result
|
||||
if result == nil {
|
||||
val := reflect.ValueOf(v)
|
||||
result = reflect.Indirect(reflect.New(val.Type())).Interface()
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Return the key used to index interfaces types we've seen. Store the number
|
||||
// of pointers in the upper 32bits, and the depth in the lower 32bits. This is
|
||||
// easy to calculate, easy to match a key with our current depth, and we don't
|
||||
// need to deal with initializing and cleaning up nested maps or slices.
|
||||
func ifaceKey(pointers, depth int) uint64 {
|
||||
return uint64(pointers)<<32 | uint64(depth)
|
||||
}
|
||||
|
||||
type walker struct {
|
||||
Result interface{}
|
||||
|
||||
copiers map[reflect.Type]CopierFunc
|
||||
shallowCopiers map[reflect.Type]struct{}
|
||||
depth int
|
||||
ignoreDepth int
|
||||
vals []reflect.Value
|
||||
cs []reflect.Value
|
||||
|
||||
// This stores the number of pointers we've walked over, indexed by depth.
|
||||
ps []int
|
||||
|
||||
// If an interface is indirected by a pointer, we need to know the type of
|
||||
// interface to create when creating the new value. Store the interface
|
||||
// types here, indexed by both the walk depth and the number of pointers
|
||||
// already seen at that depth. Use ifaceKey to calculate the proper uint64
|
||||
// value.
|
||||
ifaceTypes map[uint64]reflect.Type
|
||||
|
||||
// any locks we've taken, indexed by depth
|
||||
locks []sync.Locker
|
||||
// take locks while walking the structure
|
||||
useLocks bool
|
||||
}
|
||||
|
||||
func (w *walker) Enter(l reflectwalk.Location) error {
|
||||
w.depth++
|
||||
|
||||
// ensure we have enough elements to index via w.depth
|
||||
for w.depth >= len(w.locks) {
|
||||
w.locks = append(w.locks, nil)
|
||||
}
|
||||
|
||||
for len(w.ps) < w.depth+1 {
|
||||
w.ps = append(w.ps, 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) Exit(l reflectwalk.Location) error {
|
||||
locker := w.locks[w.depth]
|
||||
w.locks[w.depth] = nil
|
||||
if locker != nil {
|
||||
defer locker.Unlock()
|
||||
}
|
||||
|
||||
// clear out pointers and interfaces as we exit the stack
|
||||
w.ps[w.depth] = 0
|
||||
|
||||
for k := range w.ifaceTypes {
|
||||
mask := uint64(^uint32(0))
|
||||
if k&mask == uint64(w.depth) {
|
||||
delete(w.ifaceTypes, k)
|
||||
}
|
||||
}
|
||||
|
||||
w.depth--
|
||||
if w.ignoreDepth > w.depth {
|
||||
w.ignoreDepth = 0
|
||||
}
|
||||
|
||||
if w.ignoring() {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch l {
|
||||
case reflectwalk.Array:
|
||||
fallthrough
|
||||
case reflectwalk.Map:
|
||||
fallthrough
|
||||
case reflectwalk.Slice:
|
||||
w.replacePointerMaybe()
|
||||
|
||||
// Pop map off our container
|
||||
w.cs = w.cs[:len(w.cs)-1]
|
||||
case reflectwalk.MapValue:
|
||||
// Pop off the key and value
|
||||
mv := w.valPop()
|
||||
mk := w.valPop()
|
||||
m := w.cs[len(w.cs)-1]
|
||||
|
||||
// If mv is the zero value, SetMapIndex deletes the key form the map,
|
||||
// or in this case never adds it. We need to create a properly typed
|
||||
// zero value so that this key can be set.
|
||||
if !mv.IsValid() {
|
||||
mv = reflect.Zero(m.Elem().Type().Elem())
|
||||
}
|
||||
m.Elem().SetMapIndex(mk, mv)
|
||||
case reflectwalk.ArrayElem:
|
||||
// Pop off the value and the index and set it on the array
|
||||
v := w.valPop()
|
||||
i := w.valPop().Interface().(int)
|
||||
if v.IsValid() {
|
||||
a := w.cs[len(w.cs)-1]
|
||||
ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call
|
||||
if ae.CanSet() {
|
||||
ae.Set(v)
|
||||
}
|
||||
}
|
||||
case reflectwalk.SliceElem:
|
||||
// Pop off the value and the index and set it on the slice
|
||||
v := w.valPop()
|
||||
i := w.valPop().Interface().(int)
|
||||
if v.IsValid() {
|
||||
s := w.cs[len(w.cs)-1]
|
||||
se := s.Elem().Index(i)
|
||||
if se.CanSet() {
|
||||
se.Set(v)
|
||||
}
|
||||
}
|
||||
case reflectwalk.Struct:
|
||||
w.replacePointerMaybe()
|
||||
|
||||
// Remove the struct from the container stack
|
||||
w.cs = w.cs[:len(w.cs)-1]
|
||||
case reflectwalk.StructField:
|
||||
// Pop off the value and the field
|
||||
v := w.valPop()
|
||||
f := w.valPop().Interface().(reflect.StructField)
|
||||
if v.IsValid() {
|
||||
s := w.cs[len(w.cs)-1]
|
||||
sf := reflect.Indirect(s).FieldByName(f.Name)
|
||||
|
||||
if sf.CanSet() {
|
||||
sf.Set(v)
|
||||
}
|
||||
}
|
||||
case reflectwalk.WalkLoc:
|
||||
// Clear out the slices for GC
|
||||
w.cs = nil
|
||||
w.vals = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) Map(m reflect.Value) error {
|
||||
if w.ignoring() {
|
||||
return nil
|
||||
}
|
||||
w.lock(m)
|
||||
|
||||
// Create the map. If the map itself is nil, then just make a nil map
|
||||
var newMap reflect.Value
|
||||
if m.IsNil() {
|
||||
newMap = reflect.New(m.Type())
|
||||
} else {
|
||||
newMap = wrapPtr(reflect.MakeMap(m.Type()))
|
||||
}
|
||||
|
||||
w.cs = append(w.cs, newMap)
|
||||
w.valPush(newMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) MapElem(m, k, v reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) PointerEnter(v bool) error {
|
||||
if v {
|
||||
w.ps[w.depth]++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) PointerExit(v bool) error {
|
||||
if v {
|
||||
w.ps[w.depth]--
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) Pointer(v reflect.Value) error {
|
||||
if _, ok := w.shallowCopiers[v.Type()]; ok {
|
||||
// Shallow copy this value. Use the same logic as primitive, then
|
||||
// return skip.
|
||||
if err := w.Primitive(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return reflectwalk.SkipEntry
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) Interface(v reflect.Value) error {
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
if w.ifaceTypes == nil {
|
||||
w.ifaceTypes = make(map[uint64]reflect.Type)
|
||||
}
|
||||
|
||||
w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) Primitive(v reflect.Value) error {
|
||||
if w.ignoring() {
|
||||
return nil
|
||||
}
|
||||
w.lock(v)
|
||||
|
||||
// IsValid verifies the v is non-zero and CanInterface verifies
|
||||
// that we're allowed to read this value (unexported fields).
|
||||
var newV reflect.Value
|
||||
if v.IsValid() && v.CanInterface() {
|
||||
newV = reflect.New(v.Type())
|
||||
newV.Elem().Set(v)
|
||||
}
|
||||
|
||||
w.valPush(newV)
|
||||
w.replacePointerMaybe()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) Slice(s reflect.Value) error {
|
||||
if w.ignoring() {
|
||||
return nil
|
||||
}
|
||||
w.lock(s)
|
||||
|
||||
var newS reflect.Value
|
||||
if s.IsNil() {
|
||||
newS = reflect.New(s.Type())
|
||||
} else {
|
||||
newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap()))
|
||||
}
|
||||
|
||||
w.cs = append(w.cs, newS)
|
||||
w.valPush(newS)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) SliceElem(i int, elem reflect.Value) error {
|
||||
if w.ignoring() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We don't write the slice here because elem might still be
|
||||
// arbitrarily complex. Just record the index and continue on.
|
||||
w.valPush(reflect.ValueOf(i))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) Array(a reflect.Value) error {
|
||||
if w.ignoring() {
|
||||
return nil
|
||||
}
|
||||
w.lock(a)
|
||||
|
||||
newA := reflect.New(a.Type())
|
||||
|
||||
w.cs = append(w.cs, newA)
|
||||
w.valPush(newA)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) ArrayElem(i int, elem reflect.Value) error {
|
||||
if w.ignoring() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We don't write the array here because elem might still be
|
||||
// arbitrarily complex. Just record the index and continue on.
|
||||
w.valPush(reflect.ValueOf(i))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) Struct(s reflect.Value) error {
|
||||
if w.ignoring() {
|
||||
return nil
|
||||
}
|
||||
w.lock(s)
|
||||
|
||||
var v reflect.Value
|
||||
if c, ok := w.copiers[s.Type()]; ok {
|
||||
// We have a Copier for this struct, so we use that copier to
|
||||
// get the copy, and we ignore anything deeper than this.
|
||||
w.ignoreDepth = w.depth
|
||||
|
||||
dup, err := c(s.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We need to put a pointer to the value on the value stack,
|
||||
// so allocate a new pointer and set it.
|
||||
v = reflect.New(s.Type())
|
||||
reflect.Indirect(v).Set(reflect.ValueOf(dup))
|
||||
} else {
|
||||
// No copier, we copy ourselves and allow reflectwalk to guide
|
||||
// us deeper into the structure for copying.
|
||||
v = reflect.New(s.Type())
|
||||
}
|
||||
|
||||
// Push the value onto the value stack for setting the struct field,
|
||||
// and add the struct itself to the containers stack in case we walk
|
||||
// deeper so that its own fields can be modified.
|
||||
w.valPush(v)
|
||||
w.cs = append(w.cs, v)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
|
||||
if w.ignoring() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If PkgPath is non-empty, this is a private (unexported) field.
|
||||
// We do not set this unexported since the Go runtime doesn't allow us.
|
||||
if f.PkgPath != "" {
|
||||
return reflectwalk.SkipEntry
|
||||
}
|
||||
|
||||
switch f.Tag.Get(tagKey) {
|
||||
case "shallow":
|
||||
// If we're shallow copying then assign the value directly to the
|
||||
// struct and skip the entry.
|
||||
if v.IsValid() {
|
||||
s := w.cs[len(w.cs)-1]
|
||||
sf := reflect.Indirect(s).FieldByName(f.Name)
|
||||
if sf.CanSet() {
|
||||
sf.Set(v)
|
||||
}
|
||||
}
|
||||
|
||||
return reflectwalk.SkipEntry
|
||||
|
||||
case "ignore":
|
||||
// Do nothing
|
||||
return reflectwalk.SkipEntry
|
||||
}
|
||||
|
||||
// Push the field onto the stack, we'll handle it when we exit
|
||||
// the struct field in Exit...
|
||||
w.valPush(reflect.ValueOf(f))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ignore causes the walker to ignore any more values until we exit this on
|
||||
func (w *walker) ignore() {
|
||||
w.ignoreDepth = w.depth
|
||||
}
|
||||
|
||||
func (w *walker) ignoring() bool {
|
||||
return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
|
||||
}
|
||||
|
||||
func (w *walker) pointerPeek() bool {
|
||||
return w.ps[w.depth] > 0
|
||||
}
|
||||
|
||||
func (w *walker) valPop() reflect.Value {
|
||||
result := w.vals[len(w.vals)-1]
|
||||
w.vals = w.vals[:len(w.vals)-1]
|
||||
|
||||
// If we're out of values, that means we popped everything off. In
|
||||
// this case, we reset the result so the next pushed value becomes
|
||||
// the result.
|
||||
if len(w.vals) == 0 {
|
||||
w.Result = nil
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (w *walker) valPush(v reflect.Value) {
|
||||
w.vals = append(w.vals, v)
|
||||
|
||||
// If we haven't set the result yet, then this is the result since
|
||||
// it is the first (outermost) value we're seeing.
|
||||
if w.Result == nil && v.IsValid() {
|
||||
w.Result = v.Interface()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *walker) replacePointerMaybe() {
|
||||
// Determine the last pointer value. If it is NOT a pointer, then
|
||||
// we need to push that onto the stack.
|
||||
if !w.pointerPeek() {
|
||||
w.valPush(reflect.Indirect(w.valPop()))
|
||||
return
|
||||
}
|
||||
|
||||
v := w.valPop()
|
||||
|
||||
// If the expected type is a pointer to an interface of any depth,
|
||||
// such as *interface{}, **interface{}, etc., then we need to convert
|
||||
// the value "v" from *CONCRETE to *interface{} so types match for
|
||||
// Set.
|
||||
//
|
||||
// Example if v is type *Foo where Foo is a struct, v would become
|
||||
// *interface{} instead. This only happens if we have an interface expectation
|
||||
// at this depth.
|
||||
//
|
||||
// For more info, see GH-16
|
||||
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
|
||||
y := reflect.New(iType) // Create *interface{}
|
||||
y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
|
||||
v = y // v is now typed *interface{} (where *v = Foo)
|
||||
}
|
||||
|
||||
for i := 1; i < w.ps[w.depth]; i++ {
|
||||
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
|
||||
iface := reflect.New(iType).Elem()
|
||||
iface.Set(v)
|
||||
v = iface
|
||||
}
|
||||
|
||||
p := reflect.New(v.Type())
|
||||
p.Elem().Set(v)
|
||||
v = p
|
||||
}
|
||||
|
||||
w.valPush(v)
|
||||
}
|
||||
|
||||
// if this value is a Locker, lock it and add it to the locks slice
|
||||
func (w *walker) lock(v reflect.Value) {
|
||||
if !w.useLocks {
|
||||
return
|
||||
}
|
||||
|
||||
if !v.IsValid() || !v.CanInterface() {
|
||||
return
|
||||
}
|
||||
|
||||
type rlocker interface {
|
||||
RLocker() sync.Locker
|
||||
}
|
||||
|
||||
var locker sync.Locker
|
||||
|
||||
// We can't call Interface() on a value directly, since that requires
|
||||
// a copy. This is OK, since the pointer to a value which is a sync.Locker
|
||||
// is also a sync.Locker.
|
||||
if v.Kind() == reflect.Ptr {
|
||||
switch l := v.Interface().(type) {
|
||||
case rlocker:
|
||||
// don't lock a mutex directly
|
||||
if _, ok := l.(*sync.RWMutex); !ok {
|
||||
locker = l.RLocker()
|
||||
}
|
||||
case sync.Locker:
|
||||
locker = l
|
||||
}
|
||||
} else if v.CanAddr() {
|
||||
switch l := v.Addr().Interface().(type) {
|
||||
case rlocker:
|
||||
// don't lock a mutex directly
|
||||
if _, ok := l.(*sync.RWMutex); !ok {
|
||||
locker = l.RLocker()
|
||||
}
|
||||
case sync.Locker:
|
||||
locker = l
|
||||
}
|
||||
}
|
||||
|
||||
// still no callable locker
|
||||
if locker == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// don't lock a mutex directly
|
||||
switch locker.(type) {
|
||||
case *sync.Mutex, *sync.RWMutex:
|
||||
return
|
||||
}
|
||||
|
||||
locker.Lock()
|
||||
w.locks[w.depth] = locker
|
||||
}
|
||||
|
||||
// wrapPtr is a helper that takes v and always make it *v. copystructure
|
||||
// stores things internally as pointers until the last moment before unwrapping
|
||||
func wrapPtr(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() {
|
||||
return v
|
||||
}
|
||||
vPtr := reflect.New(v.Type())
|
||||
vPtr.Elem().Set(v)
|
||||
return vPtr
|
||||
}
|
1
vendor/github.com/mitchellh/reflectwalk/.travis.yml
generated
vendored
Normal file
1
vendor/github.com/mitchellh/reflectwalk/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
language: go
|
21
vendor/github.com/mitchellh/reflectwalk/LICENSE
generated
vendored
Normal file
21
vendor/github.com/mitchellh/reflectwalk/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
6
vendor/github.com/mitchellh/reflectwalk/README.md
generated
vendored
Normal file
6
vendor/github.com/mitchellh/reflectwalk/README.md
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
# reflectwalk
|
||||
|
||||
reflectwalk is a Go library for "walking" a value in Go using reflection,
|
||||
in the same way a directory tree can be "walked" on the filesystem. Walking
|
||||
a complex structure can allow you to do manipulations on unknown structures
|
||||
such as those decoded from JSON.
|
19
vendor/github.com/mitchellh/reflectwalk/location.go
generated
vendored
Normal file
19
vendor/github.com/mitchellh/reflectwalk/location.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
package reflectwalk
|
||||
|
||||
//go:generate stringer -type=Location location.go
|
||||
|
||||
type Location uint
|
||||
|
||||
const (
|
||||
None Location = iota
|
||||
Map
|
||||
MapKey
|
||||
MapValue
|
||||
Slice
|
||||
SliceElem
|
||||
Array
|
||||
ArrayElem
|
||||
Struct
|
||||
StructField
|
||||
WalkLoc
|
||||
)
|
16
vendor/github.com/mitchellh/reflectwalk/location_string.go
generated
vendored
Normal file
16
vendor/github.com/mitchellh/reflectwalk/location_string.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// Code generated by "stringer -type=Location location.go"; DO NOT EDIT.
|
||||
|
||||
package reflectwalk
|
||||
|
||||
import "fmt"
|
||||
|
||||
const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc"
|
||||
|
||||
var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73}
|
||||
|
||||
func (i Location) String() string {
|
||||
if i >= Location(len(_Location_index)-1) {
|
||||
return fmt.Sprintf("Location(%d)", i)
|
||||
}
|
||||
return _Location_name[_Location_index[i]:_Location_index[i+1]]
|
||||
}
|
420
vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
generated
vendored
Normal file
420
vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
generated
vendored
Normal file
|
@ -0,0 +1,420 @@
|
|||
// reflectwalk is a package that allows you to "walk" complex structures
|
||||
// similar to how you may "walk" a filesystem: visiting every element one
|
||||
// by one and calling callback functions allowing you to handle and manipulate
|
||||
// those elements.
|
||||
package reflectwalk
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// PrimitiveWalker implementations are able to handle primitive values
|
||||
// within complex structures. Primitive values are numbers, strings,
|
||||
// booleans, funcs, chans.
|
||||
//
|
||||
// These primitive values are often members of more complex
|
||||
// structures (slices, maps, etc.) that are walkable by other interfaces.
|
||||
type PrimitiveWalker interface {
|
||||
Primitive(reflect.Value) error
|
||||
}
|
||||
|
||||
// InterfaceWalker implementations are able to handle interface values as they
|
||||
// are encountered during the walk.
|
||||
type InterfaceWalker interface {
|
||||
Interface(reflect.Value) error
|
||||
}
|
||||
|
||||
// MapWalker implementations are able to handle individual elements
|
||||
// found within a map structure.
|
||||
type MapWalker interface {
|
||||
Map(m reflect.Value) error
|
||||
MapElem(m, k, v reflect.Value) error
|
||||
}
|
||||
|
||||
// SliceWalker implementations are able to handle slice elements found
|
||||
// within complex structures.
|
||||
type SliceWalker interface {
|
||||
Slice(reflect.Value) error
|
||||
SliceElem(int, reflect.Value) error
|
||||
}
|
||||
|
||||
// ArrayWalker implementations are able to handle array elements found
|
||||
// within complex structures.
|
||||
type ArrayWalker interface {
|
||||
Array(reflect.Value) error
|
||||
ArrayElem(int, reflect.Value) error
|
||||
}
|
||||
|
||||
// StructWalker is an interface that has methods that are called for
|
||||
// structs when a Walk is done.
|
||||
type StructWalker interface {
|
||||
Struct(reflect.Value) error
|
||||
StructField(reflect.StructField, reflect.Value) error
|
||||
}
|
||||
|
||||
// EnterExitWalker implementations are notified before and after
|
||||
// they walk deeper into complex structures (into struct fields,
|
||||
// into slice elements, etc.)
|
||||
type EnterExitWalker interface {
|
||||
Enter(Location) error
|
||||
Exit(Location) error
|
||||
}
|
||||
|
||||
// PointerWalker implementations are notified when the value they're
|
||||
// walking is a pointer or not. Pointer is called for _every_ value whether
|
||||
// it is a pointer or not.
|
||||
type PointerWalker interface {
|
||||
PointerEnter(bool) error
|
||||
PointerExit(bool) error
|
||||
}
|
||||
|
||||
// PointerValueWalker implementations are notified with the value of
|
||||
// a particular pointer when a pointer is walked. Pointer is called
|
||||
// right before PointerEnter.
|
||||
type PointerValueWalker interface {
|
||||
Pointer(reflect.Value) error
|
||||
}
|
||||
|
||||
// SkipEntry can be returned from walk functions to skip walking
|
||||
// the value of this field. This is only valid in the following functions:
|
||||
//
|
||||
// - Struct: skips all fields from being walked
|
||||
// - StructField: skips walking the struct value
|
||||
//
|
||||
var SkipEntry = errors.New("skip this entry")
|
||||
|
||||
// Walk takes an arbitrary value and an interface and traverses the
|
||||
// value, calling callbacks on the interface if they are supported.
|
||||
// The interface should implement one or more of the walker interfaces
|
||||
// in this package, such as PrimitiveWalker, StructWalker, etc.
|
||||
func Walk(data, walker interface{}) (err error) {
|
||||
v := reflect.ValueOf(data)
|
||||
ew, ok := walker.(EnterExitWalker)
|
||||
if ok {
|
||||
err = ew.Enter(WalkLoc)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = walk(v, walker)
|
||||
}
|
||||
|
||||
if ok && err == nil {
|
||||
err = ew.Exit(WalkLoc)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func walk(v reflect.Value, w interface{}) (err error) {
|
||||
// Determine if we're receiving a pointer and if so notify the walker.
|
||||
// The logic here is convoluted but very important (tests will fail if
|
||||
// almost any part is changed). I will try to explain here.
|
||||
//
|
||||
// First, we check if the value is an interface, if so, we really need
|
||||
// to check the interface's VALUE to see whether it is a pointer.
|
||||
//
|
||||
// Check whether the value is then a pointer. If so, then set pointer
|
||||
// to true to notify the user.
|
||||
//
|
||||
// If we still have a pointer or an interface after the indirections, then
|
||||
// we unwrap another level
|
||||
//
|
||||
// At this time, we also set "v" to be the dereferenced value. This is
|
||||
// because once we've unwrapped the pointer we want to use that value.
|
||||
pointer := false
|
||||
pointerV := v
|
||||
|
||||
for {
|
||||
if pointerV.Kind() == reflect.Interface {
|
||||
if iw, ok := w.(InterfaceWalker); ok {
|
||||
if err = iw.Interface(pointerV); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
pointerV = pointerV.Elem()
|
||||
}
|
||||
|
||||
if pointerV.Kind() == reflect.Ptr {
|
||||
if pw, ok := w.(PointerValueWalker); ok {
|
||||
if err = pw.Pointer(pointerV); err != nil {
|
||||
if err == SkipEntry {
|
||||
// Skip the rest of this entry but clear the error
|
||||
return nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
pointer = true
|
||||
v = reflect.Indirect(pointerV)
|
||||
}
|
||||
if pw, ok := w.(PointerWalker); ok {
|
||||
if err = pw.PointerEnter(pointer); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func(pointer bool) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = pw.PointerExit(pointer)
|
||||
}(pointer)
|
||||
}
|
||||
|
||||
if pointer {
|
||||
pointerV = v
|
||||
}
|
||||
pointer = false
|
||||
|
||||
// If we still have a pointer or interface we have to indirect another level.
|
||||
switch pointerV.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// We preserve the original value here because if it is an interface
|
||||
// type, we want to pass that directly into the walkPrimitive, so that
|
||||
// we can set it.
|
||||
originalV := v
|
||||
if v.Kind() == reflect.Interface {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
k := v.Kind()
|
||||
if k >= reflect.Int && k <= reflect.Complex128 {
|
||||
k = reflect.Int
|
||||
}
|
||||
|
||||
switch k {
|
||||
// Primitives
|
||||
case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid:
|
||||
err = walkPrimitive(originalV, w)
|
||||
return
|
||||
case reflect.Map:
|
||||
err = walkMap(v, w)
|
||||
return
|
||||
case reflect.Slice:
|
||||
err = walkSlice(v, w)
|
||||
return
|
||||
case reflect.Struct:
|
||||
err = walkStruct(v, w)
|
||||
return
|
||||
case reflect.Array:
|
||||
err = walkArray(v, w)
|
||||
return
|
||||
default:
|
||||
panic("unsupported type: " + k.String())
|
||||
}
|
||||
}
|
||||
|
||||
func walkMap(v reflect.Value, w interface{}) error {
|
||||
ew, ewok := w.(EnterExitWalker)
|
||||
if ewok {
|
||||
ew.Enter(Map)
|
||||
}
|
||||
|
||||
if mw, ok := w.(MapWalker); ok {
|
||||
if err := mw.Map(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, k := range v.MapKeys() {
|
||||
kv := v.MapIndex(k)
|
||||
|
||||
if mw, ok := w.(MapWalker); ok {
|
||||
if err := mw.MapElem(v, k, kv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ew, ok := w.(EnterExitWalker)
|
||||
if ok {
|
||||
ew.Enter(MapKey)
|
||||
}
|
||||
|
||||
if err := walk(k, w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ok {
|
||||
ew.Exit(MapKey)
|
||||
ew.Enter(MapValue)
|
||||
}
|
||||
|
||||
// get the map value again as it may have changed in the MapElem call
|
||||
if err := walk(v.MapIndex(k), w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ok {
|
||||
ew.Exit(MapValue)
|
||||
}
|
||||
}
|
||||
|
||||
if ewok {
|
||||
ew.Exit(Map)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func walkPrimitive(v reflect.Value, w interface{}) error {
|
||||
if pw, ok := w.(PrimitiveWalker); ok {
|
||||
return pw.Primitive(v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func walkSlice(v reflect.Value, w interface{}) (err error) {
|
||||
ew, ok := w.(EnterExitWalker)
|
||||
if ok {
|
||||
ew.Enter(Slice)
|
||||
}
|
||||
|
||||
if sw, ok := w.(SliceWalker); ok {
|
||||
if err := sw.Slice(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
elem := v.Index(i)
|
||||
|
||||
if sw, ok := w.(SliceWalker); ok {
|
||||
if err := sw.SliceElem(i, elem); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ew, ok := w.(EnterExitWalker)
|
||||
if ok {
|
||||
ew.Enter(SliceElem)
|
||||
}
|
||||
|
||||
if err := walk(elem, w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ok {
|
||||
ew.Exit(SliceElem)
|
||||
}
|
||||
}
|
||||
|
||||
ew, ok = w.(EnterExitWalker)
|
||||
if ok {
|
||||
ew.Exit(Slice)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func walkArray(v reflect.Value, w interface{}) (err error) {
|
||||
ew, ok := w.(EnterExitWalker)
|
||||
if ok {
|
||||
ew.Enter(Array)
|
||||
}
|
||||
|
||||
if aw, ok := w.(ArrayWalker); ok {
|
||||
if err := aw.Array(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
elem := v.Index(i)
|
||||
|
||||
if aw, ok := w.(ArrayWalker); ok {
|
||||
if err := aw.ArrayElem(i, elem); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ew, ok := w.(EnterExitWalker)
|
||||
if ok {
|
||||
ew.Enter(ArrayElem)
|
||||
}
|
||||
|
||||
if err := walk(elem, w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ok {
|
||||
ew.Exit(ArrayElem)
|
||||
}
|
||||
}
|
||||
|
||||
ew, ok = w.(EnterExitWalker)
|
||||
if ok {
|
||||
ew.Exit(Array)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func walkStruct(v reflect.Value, w interface{}) (err error) {
|
||||
ew, ewok := w.(EnterExitWalker)
|
||||
if ewok {
|
||||
ew.Enter(Struct)
|
||||
}
|
||||
|
||||
skip := false
|
||||
if sw, ok := w.(StructWalker); ok {
|
||||
err = sw.Struct(v)
|
||||
if err == SkipEntry {
|
||||
skip = true
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !skip {
|
||||
vt := v.Type()
|
||||
for i := 0; i < vt.NumField(); i++ {
|
||||
sf := vt.Field(i)
|
||||
f := v.FieldByIndex([]int{i})
|
||||
|
||||
if sw, ok := w.(StructWalker); ok {
|
||||
err = sw.StructField(sf, f)
|
||||
|
||||
// SkipEntry just pretends this field doesn't even exist
|
||||
if err == SkipEntry {
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ew, ok := w.(EnterExitWalker)
|
||||
if ok {
|
||||
ew.Enter(StructField)
|
||||
}
|
||||
|
||||
err = walk(f, w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ok {
|
||||
ew.Exit(StructField)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ewok {
|
||||
ew.Exit(Struct)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
10
vendor/modules.txt
vendored
10
vendor/modules.txt
vendored
|
@ -611,9 +611,15 @@ github.com/miekg/dns
|
|||
# github.com/mistifyio/go-zfs/v3 v3.0.1
|
||||
## explicit; go 1.14
|
||||
github.com/mistifyio/go-zfs/v3
|
||||
# github.com/mitchellh/copystructure v1.2.0
|
||||
## explicit; go 1.15
|
||||
github.com/mitchellh/copystructure
|
||||
# github.com/mitchellh/hashstructure/v2 v2.0.2
|
||||
## explicit; go 1.14
|
||||
github.com/mitchellh/hashstructure/v2
|
||||
# github.com/mitchellh/reflectwalk v1.0.2
|
||||
## explicit
|
||||
github.com/mitchellh/reflectwalk
|
||||
# github.com/moby/buildkit v0.11.7-0.20230525183624-798ad6b0ce9f
|
||||
## explicit; go 1.18
|
||||
github.com/moby/buildkit/api/services/control
|
||||
|
@ -864,6 +870,10 @@ github.com/moby/term/windows
|
|||
# github.com/morikuni/aec v1.0.0
|
||||
## explicit
|
||||
github.com/morikuni/aec
|
||||
# github.com/onsi/ginkgo/v2 v2.1.4
|
||||
## explicit; go 1.18
|
||||
# github.com/onsi/gomega v1.20.1
|
||||
## explicit; go 1.18
|
||||
# github.com/opencontainers/go-digest v1.0.0
|
||||
## explicit; go 1.13
|
||||
github.com/opencontainers/go-digest
|
||||
|
|
Loading…
Reference in a new issue