|
@@ -16,6 +16,7 @@ import (
|
|
"runtime"
|
|
"runtime"
|
|
"strings"
|
|
"strings"
|
|
"sync"
|
|
"sync"
|
|
|
|
+ "sync/atomic"
|
|
"time"
|
|
"time"
|
|
|
|
|
|
"github.com/containerd/containerd"
|
|
"github.com/containerd/containerd"
|
|
@@ -84,7 +85,8 @@ type Daemon struct {
|
|
containersReplica *container.ViewDB
|
|
containersReplica *container.ViewDB
|
|
execCommands *container.ExecStore
|
|
execCommands *container.ExecStore
|
|
imageService ImageService
|
|
imageService ImageService
|
|
- configStore *config.Config
|
|
|
|
|
|
+ configStore atomic.Pointer[config.Config]
|
|
|
|
+ configReload sync.Mutex
|
|
statsCollector *stats.Collector
|
|
statsCollector *stats.Collector
|
|
defaultLogConfig containertypes.LogConfig
|
|
defaultLogConfig containertypes.LogConfig
|
|
registryService *registry.Service
|
|
registryService *registry.Service
|
|
@@ -148,20 +150,31 @@ func (daemon *Daemon) StoreHosts(hosts []string) {
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+// config returns an immutable snapshot of the current daemon configuration.
|
|
|
|
+// Multiple calls to this function will return the same pointer until the
|
|
|
|
+// configuration is reloaded so callers must take care not to modify the
|
|
|
|
+// returned value.
|
|
|
|
+//
|
|
|
|
+// To ensure that the configuration used remains consistent throughout the
|
|
|
|
+// lifetime of an operation, the configuration pointer should be passed down the
|
|
|
|
+// call stack, like one would a [context.Context] value. Only the entrypoints
|
|
|
|
+// for operations, the outermost functions, should call this function.
|
|
|
|
+func (daemon *Daemon) config() *config.Config {
|
|
|
|
+ cfg := daemon.configStore.Load()
|
|
|
|
+ if cfg == nil {
|
|
|
|
+ return &config.Config{}
|
|
|
|
+ }
|
|
|
|
+ return cfg
|
|
|
|
+}
|
|
|
|
+
|
|
// HasExperimental returns whether the experimental features of the daemon are enabled or not
|
|
// HasExperimental returns whether the experimental features of the daemon are enabled or not
|
|
func (daemon *Daemon) HasExperimental() bool {
|
|
func (daemon *Daemon) HasExperimental() bool {
|
|
- return daemon.configStore != nil && daemon.configStore.Experimental
|
|
|
|
|
|
+ return daemon.config().Experimental
|
|
}
|
|
}
|
|
|
|
|
|
// Features returns the features map from configStore
|
|
// Features returns the features map from configStore
|
|
func (daemon *Daemon) Features() map[string]bool {
|
|
func (daemon *Daemon) Features() map[string]bool {
|
|
- daemon.configStore.Lock()
|
|
|
|
- defer daemon.configStore.Unlock()
|
|
|
|
- f := make(map[string]bool, len(daemon.configStore.Features))
|
|
|
|
- for k, v := range daemon.configStore.Features {
|
|
|
|
- f[k] = v
|
|
|
|
- }
|
|
|
|
- return f
|
|
|
|
|
|
+ return daemon.config().Features
|
|
}
|
|
}
|
|
|
|
|
|
// UsesSnapshotter returns true if feature flag to use containerd snapshotter is enabled
|
|
// UsesSnapshotter returns true if feature flag to use containerd snapshotter is enabled
|
|
@@ -172,17 +185,14 @@ func (daemon *Daemon) UsesSnapshotter() bool {
|
|
// RegistryHosts returns the registry hosts configuration for the host component
|
|
// RegistryHosts returns the registry hosts configuration for the host component
|
|
// of a distribution image reference.
|
|
// of a distribution image reference.
|
|
func (daemon *Daemon) RegistryHosts(host string) ([]docker.RegistryHost, error) {
|
|
func (daemon *Daemon) RegistryHosts(host string) ([]docker.RegistryHost, error) {
|
|
- daemon.configStore.Lock()
|
|
|
|
- serviceOpts := daemon.configStore.ServiceOptions
|
|
|
|
- daemon.configStore.Unlock()
|
|
|
|
-
|
|
|
|
var (
|
|
var (
|
|
|
|
+ conf = daemon.config()
|
|
registryKey = "docker.io"
|
|
registryKey = "docker.io"
|
|
- mirrors = make([]string, len(serviceOpts.Mirrors))
|
|
|
|
|
|
+ mirrors = make([]string, len(conf.Mirrors))
|
|
m = map[string]resolverconfig.RegistryConfig{}
|
|
m = map[string]resolverconfig.RegistryConfig{}
|
|
)
|
|
)
|
|
// must trim "https://" or "http://" prefix
|
|
// must trim "https://" or "http://" prefix
|
|
- for i, v := range serviceOpts.Mirrors {
|
|
|
|
|
|
+ for i, v := range conf.Mirrors {
|
|
if uri, err := url.Parse(v); err == nil {
|
|
if uri, err := url.Parse(v); err == nil {
|
|
v = uri.Host
|
|
v = uri.Host
|
|
}
|
|
}
|
|
@@ -191,7 +201,7 @@ func (daemon *Daemon) RegistryHosts(host string) ([]docker.RegistryHost, error)
|
|
// set mirrors for default registry
|
|
// set mirrors for default registry
|
|
m[registryKey] = resolverconfig.RegistryConfig{Mirrors: mirrors}
|
|
m[registryKey] = resolverconfig.RegistryConfig{Mirrors: mirrors}
|
|
|
|
|
|
- for _, v := range serviceOpts.InsecureRegistries {
|
|
|
|
|
|
+ for _, v := range conf.InsecureRegistries {
|
|
u, err := url.Parse(v)
|
|
u, err := url.Parse(v)
|
|
if err != nil && !strings.HasPrefix(v, "http://") && !strings.HasPrefix(v, "https://") {
|
|
if err != nil && !strings.HasPrefix(v, "http://") && !strings.HasPrefix(v, "https://") {
|
|
originalErr := err
|
|
originalErr := err
|
|
@@ -237,7 +247,7 @@ type layerAccessor interface {
|
|
GetLayerByID(cid string) (layer.RWLayer, error)
|
|
GetLayerByID(cid string) (layer.RWLayer, error)
|
|
}
|
|
}
|
|
|
|
|
|
-func (daemon *Daemon) restore() error {
|
|
|
|
|
|
+func (daemon *Daemon) restore(cfg *config.Config) error {
|
|
var mapLock sync.Mutex
|
|
var mapLock sync.Mutex
|
|
containers := make(map[string]*container.Container)
|
|
containers := make(map[string]*container.Container)
|
|
|
|
|
|
@@ -377,7 +387,7 @@ func (daemon *Daemon) restore() error {
|
|
logger(c).WithError(err).Error("failed to delete task from containerd")
|
|
logger(c).WithError(err).Error("failed to delete task from containerd")
|
|
return
|
|
return
|
|
}
|
|
}
|
|
- } else if !daemon.configStore.LiveRestoreEnabled {
|
|
|
|
|
|
+ } else if !cfg.LiveRestoreEnabled {
|
|
logger(c).Debug("shutting down container considered alive by containerd")
|
|
logger(c).Debug("shutting down container considered alive by containerd")
|
|
if err := daemon.shutdownContainer(c); err != nil && !errdefs.IsNotFound(err) {
|
|
if err := daemon.shutdownContainer(c); err != nil && !errdefs.IsNotFound(err) {
|
|
log.WithError(err).Error("error shutting down container")
|
|
log.WithError(err).Error("error shutting down container")
|
|
@@ -457,7 +467,7 @@ func (daemon *Daemon) restore() error {
|
|
|
|
|
|
c.ResetRestartManager(false)
|
|
c.ResetRestartManager(false)
|
|
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
|
|
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
|
|
- options, err := daemon.buildSandboxOptions(c)
|
|
|
|
|
|
+ options, err := daemon.buildSandboxOptions(cfg, c)
|
|
if err != nil {
|
|
if err != nil {
|
|
logger(c).WithError(err).Warn("failed to build sandbox option to restore container")
|
|
logger(c).WithError(err).Warn("failed to build sandbox option to restore container")
|
|
}
|
|
}
|
|
@@ -475,7 +485,7 @@ func (daemon *Daemon) restore() error {
|
|
// not initialized yet. We will start
|
|
// not initialized yet. We will start
|
|
// it after the cluster is
|
|
// it after the cluster is
|
|
// initialized.
|
|
// initialized.
|
|
- if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
|
|
|
|
|
|
+ if cfg.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
|
|
mapLock.Lock()
|
|
mapLock.Lock()
|
|
restartContainers[c] = make(chan struct{})
|
|
restartContainers[c] = make(chan struct{})
|
|
mapLock.Unlock()
|
|
mapLock.Unlock()
|
|
@@ -513,7 +523,7 @@ func (daemon *Daemon) restore() error {
|
|
//
|
|
//
|
|
// Note that we cannot initialize the network controller earlier, as it
|
|
// Note that we cannot initialize the network controller earlier, as it
|
|
// needs to know if there's active sandboxes (running containers).
|
|
// needs to know if there's active sandboxes (running containers).
|
|
- if err = daemon.initNetworkController(activeSandboxes); err != nil {
|
|
|
|
|
|
+ if err = daemon.initNetworkController(cfg, activeSandboxes); err != nil {
|
|
return fmt.Errorf("Error initializing network controller: %v", err)
|
|
return fmt.Errorf("Error initializing network controller: %v", err)
|
|
}
|
|
}
|
|
|
|
|
|
@@ -560,7 +570,7 @@ func (daemon *Daemon) restore() error {
|
|
if err := daemon.prepareMountPoints(c); err != nil {
|
|
if err := daemon.prepareMountPoints(c); err != nil {
|
|
log.WithError(err).Error("failed to prepare mount points for container")
|
|
log.WithError(err).Error("failed to prepare mount points for container")
|
|
}
|
|
}
|
|
- if err := daemon.containerStart(context.Background(), c, "", "", true); err != nil {
|
|
|
|
|
|
+ if err := daemon.containerStart(context.Background(), cfg, c, "", "", true); err != nil {
|
|
log.WithError(err).Error("failed to start container")
|
|
log.WithError(err).Error("failed to start container")
|
|
}
|
|
}
|
|
close(chNotify)
|
|
close(chNotify)
|
|
@@ -576,7 +586,7 @@ func (daemon *Daemon) restore() error {
|
|
go func(cid string) {
|
|
go func(cid string) {
|
|
_ = sem.Acquire(context.Background(), 1)
|
|
_ = sem.Acquire(context.Background(), 1)
|
|
|
|
|
|
- if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
|
|
|
|
|
+ if err := daemon.containerRm(cfg, cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
|
logrus.WithField("container", cid).WithError(err).Error("failed to remove container")
|
|
logrus.WithField("container", cid).WithError(err).Error("failed to remove container")
|
|
}
|
|
}
|
|
|
|
|
|
@@ -624,7 +634,7 @@ func (daemon *Daemon) restore() error {
|
|
|
|
|
|
// RestartSwarmContainers restarts any autostart container which has a
|
|
// RestartSwarmContainers restarts any autostart container which has a
|
|
// swarm endpoint.
|
|
// swarm endpoint.
|
|
-func (daemon *Daemon) RestartSwarmContainers() {
|
|
|
|
|
|
+func (daemon *Daemon) RestartSwarmContainers(cfg *config.Config) {
|
|
ctx := context.Background()
|
|
ctx := context.Background()
|
|
|
|
|
|
// parallelLimit is the maximum number of parallel startup jobs that we
|
|
// parallelLimit is the maximum number of parallel startup jobs that we
|
|
@@ -642,7 +652,7 @@ func (daemon *Daemon) RestartSwarmContainers() {
|
|
// Autostart all the containers which has a
|
|
// Autostart all the containers which has a
|
|
// swarm endpoint now that the cluster is
|
|
// swarm endpoint now that the cluster is
|
|
// initialized.
|
|
// initialized.
|
|
- if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
|
|
|
|
|
|
+ if cfg.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
|
|
group.Add(1)
|
|
group.Add(1)
|
|
go func(c *container.Container) {
|
|
go func(c *container.Container) {
|
|
if err := sem.Acquire(ctx, 1); err != nil {
|
|
if err := sem.Acquire(ctx, 1); err != nil {
|
|
@@ -651,7 +661,7 @@ func (daemon *Daemon) RestartSwarmContainers() {
|
|
return
|
|
return
|
|
}
|
|
}
|
|
|
|
|
|
- if err := daemon.containerStart(ctx, c, "", "", true); err != nil {
|
|
|
|
|
|
+ if err := daemon.containerStart(ctx, cfg, c, "", "", true); err != nil {
|
|
logrus.WithField("container", c.ID).WithError(err).Error("failed to start swarm container")
|
|
logrus.WithField("container", c.ID).WithError(err).Error("failed to start swarm container")
|
|
}
|
|
}
|
|
|
|
|
|
@@ -735,10 +745,7 @@ func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {
|
|
// IsSwarmCompatible verifies if the current daemon
|
|
// IsSwarmCompatible verifies if the current daemon
|
|
// configuration is compatible with the swarm mode
|
|
// configuration is compatible with the swarm mode
|
|
func (daemon *Daemon) IsSwarmCompatible() error {
|
|
func (daemon *Daemon) IsSwarmCompatible() error {
|
|
- if daemon.configStore == nil {
|
|
|
|
- return nil
|
|
|
|
- }
|
|
|
|
- return daemon.configStore.IsSwarmCompatible()
|
|
|
|
|
|
+ return daemon.config().IsSwarmCompatible()
|
|
}
|
|
}
|
|
|
|
|
|
// NewDaemon sets up everything for the daemon to be able to service
|
|
// NewDaemon sets up everything for the daemon to be able to service
|
|
@@ -800,10 +807,10 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|
}
|
|
}
|
|
|
|
|
|
d := &Daemon{
|
|
d := &Daemon{
|
|
- configStore: config,
|
|
|
|
PluginStore: pluginStore,
|
|
PluginStore: pluginStore,
|
|
startupDone: make(chan struct{}),
|
|
startupDone: make(chan struct{}),
|
|
}
|
|
}
|
|
|
|
+ d.configStore.Store(config)
|
|
|
|
|
|
// TEST_INTEGRATION_USE_SNAPSHOTTER is used for integration tests only.
|
|
// TEST_INTEGRATION_USE_SNAPSHOTTER is used for integration tests only.
|
|
if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" {
|
|
if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" {
|
|
@@ -834,12 +841,12 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|
}
|
|
}
|
|
d.setupDumpStackTrap(stackDumpDir)
|
|
d.setupDumpStackTrap(stackDumpDir)
|
|
|
|
|
|
- if err := d.setupSeccompProfile(); err != nil {
|
|
|
|
|
|
+ if err := d.setupSeccompProfile(config); err != nil {
|
|
return nil, err
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
|
|
// Set the default isolation mode (only applicable on Windows)
|
|
// Set the default isolation mode (only applicable on Windows)
|
|
- if err := d.setDefaultIsolation(); err != nil {
|
|
|
|
|
|
+ if err := d.setDefaultIsolation(config); err != nil {
|
|
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
|
|
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
|
|
}
|
|
}
|
|
|
|
|
|
@@ -881,7 +888,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|
d.registryService = registryService
|
|
d.registryService = registryService
|
|
dlogger.RegisterPluginGetter(d.PluginStore)
|
|
dlogger.RegisterPluginGetter(d.PluginStore)
|
|
|
|
|
|
- metricsSockPath, err := d.listenMetricsSock()
|
|
|
|
|
|
+ metricsSockPath, err := d.listenMetricsSock(config)
|
|
if err != nil {
|
|
if err != nil {
|
|
return nil, err
|
|
return nil, err
|
|
}
|
|
}
|
|
@@ -942,7 +949,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|
shimOpts interface{}
|
|
shimOpts interface{}
|
|
)
|
|
)
|
|
if runtime.GOOS != "windows" {
|
|
if runtime.GOOS != "windows" {
|
|
- shim, shimOpts, err = d.getRuntime(config.GetDefaultRuntimeName())
|
|
|
|
|
|
+ shim, shimOpts, err = d.getRuntime(config, config.DefaultRuntime)
|
|
if err != nil {
|
|
if err != nil {
|
|
return nil, err
|
|
return nil, err
|
|
}
|
|
}
|
|
@@ -965,9 +972,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|
return nil, errors.Wrap(err, "couldn't create plugin manager")
|
|
return nil, errors.Wrap(err, "couldn't create plugin manager")
|
|
}
|
|
}
|
|
|
|
|
|
- if err := d.setupDefaultLogConfig(); err != nil {
|
|
|
|
- return nil, err
|
|
|
|
|
|
+ d.defaultLogConfig, err = defaultLogConfig(config)
|
|
|
|
+ if err != nil {
|
|
|
|
+ return nil, errors.Wrap(err, "failed to set log opts")
|
|
}
|
|
}
|
|
|
|
+ logrus.Debugf("Using default logging driver %s", d.defaultLogConfig.Type)
|
|
|
|
|
|
d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d)
|
|
d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d)
|
|
if err != nil {
|
|
if err != nil {
|
|
@@ -982,7 +991,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|
// at this point.
|
|
// at this point.
|
|
//
|
|
//
|
|
// TODO(thaJeztah) add a utility to only collect the CgroupDevicesEnabled information
|
|
// TODO(thaJeztah) add a utility to only collect the CgroupDevicesEnabled information
|
|
- if runtime.GOOS == "linux" && !userns.RunningInUserNS() && !getSysInfo(d).CgroupDevicesEnabled {
|
|
|
|
|
|
+ if runtime.GOOS == "linux" && !userns.RunningInUserNS() && !getSysInfo(config).CgroupDevicesEnabled {
|
|
return nil, errors.New("Devices cgroup isn't mounted")
|
|
return nil, errors.New("Devices cgroup isn't mounted")
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1135,11 +1144,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|
|
|
|
|
go d.execCommandGC()
|
|
go d.execCommandGC()
|
|
|
|
|
|
- if err := d.initLibcontainerd(ctx); err != nil {
|
|
|
|
|
|
+ if err := d.initLibcontainerd(ctx, config); err != nil {
|
|
return nil, err
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
|
|
- if err := d.restore(); err != nil {
|
|
|
|
|
|
+ if err := d.restore(config); err != nil {
|
|
return nil, err
|
|
return nil, err
|
|
}
|
|
}
|
|
close(d.startupDone)
|
|
close(d.startupDone)
|
|
@@ -1201,7 +1210,11 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error {
|
|
// A negative (-1) timeout means "indefinitely", which means that containers
|
|
// A negative (-1) timeout means "indefinitely", which means that containers
|
|
// are not forcibly killed, and the daemon shuts down after all containers exit.
|
|
// are not forcibly killed, and the daemon shuts down after all containers exit.
|
|
func (daemon *Daemon) ShutdownTimeout() int {
|
|
func (daemon *Daemon) ShutdownTimeout() int {
|
|
- shutdownTimeout := daemon.configStore.ShutdownTimeout
|
|
|
|
|
|
+ return daemon.shutdownTimeout(daemon.config())
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+func (daemon *Daemon) shutdownTimeout(cfg *config.Config) int {
|
|
|
|
+ shutdownTimeout := cfg.ShutdownTimeout
|
|
if shutdownTimeout < 0 {
|
|
if shutdownTimeout < 0 {
|
|
return -1
|
|
return -1
|
|
}
|
|
}
|
|
@@ -1228,7 +1241,8 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
|
|
// Keep mounts and networking running on daemon shutdown if
|
|
// Keep mounts and networking running on daemon shutdown if
|
|
// we are to keep containers running and restore them.
|
|
// we are to keep containers running and restore them.
|
|
|
|
|
|
- if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
|
|
|
|
|
|
+ cfg := daemon.config()
|
|
|
|
+ if cfg.LiveRestoreEnabled && daemon.containers != nil {
|
|
// check if there are any running containers, if none we should do some cleanup
|
|
// check if there are any running containers, if none we should do some cleanup
|
|
if ls, err := daemon.Containers(ctx, &types.ContainerListOptions{}); len(ls) != 0 || err != nil {
|
|
if ls, err := daemon.Containers(ctx, &types.ContainerListOptions{}); len(ls) != 0 || err != nil {
|
|
// metrics plugins still need some cleanup
|
|
// metrics plugins still need some cleanup
|
|
@@ -1238,8 +1252,8 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
|
|
}
|
|
}
|
|
|
|
|
|
if daemon.containers != nil {
|
|
if daemon.containers != nil {
|
|
- logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout)
|
|
|
|
- logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout())
|
|
|
|
|
|
+ logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", cfg.ShutdownTimeout)
|
|
|
|
+ logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.shutdownTimeout(cfg))
|
|
daemon.containers.ApplyAll(func(c *container.Container) {
|
|
daemon.containers.ApplyAll(func(c *container.Container) {
|
|
if !c.IsRunning() {
|
|
if !c.IsRunning() {
|
|
return
|
|
return
|
|
@@ -1293,7 +1307,7 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error {
|
|
daemon.mdDB.Close()
|
|
daemon.mdDB.Close()
|
|
}
|
|
}
|
|
|
|
|
|
- return daemon.cleanupMounts()
|
|
|
|
|
|
+ return daemon.cleanupMounts(cfg)
|
|
}
|
|
}
|
|
|
|
|
|
// Mount sets container.BaseFS
|
|
// Mount sets container.BaseFS
|
|
@@ -1374,15 +1388,10 @@ func isBridgeNetworkDisabled(conf *config.Config) bool {
|
|
return conf.BridgeConfig.Iface == config.DisableNetworkBridge
|
|
return conf.BridgeConfig.Iface == config.DisableNetworkBridge
|
|
}
|
|
}
|
|
|
|
|
|
-func (daemon *Daemon) networkOptions(pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
|
|
|
|
- options := []nwconfig.Option{}
|
|
|
|
- if daemon.configStore == nil {
|
|
|
|
- return options, nil
|
|
|
|
- }
|
|
|
|
- conf := daemon.configStore
|
|
|
|
|
|
+func (daemon *Daemon) networkOptions(conf *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
|
|
dd := runconfig.DefaultDaemonNetworkMode()
|
|
dd := runconfig.DefaultDaemonNetworkMode()
|
|
|
|
|
|
- options = []nwconfig.Option{
|
|
|
|
|
|
+ options := []nwconfig.Option{
|
|
nwconfig.OptionDataDir(conf.Root),
|
|
nwconfig.OptionDataDir(conf.Root),
|
|
nwconfig.OptionExecRoot(conf.GetExecRoot()),
|
|
nwconfig.OptionExecRoot(conf.GetExecRoot()),
|
|
nwconfig.OptionDefaultDriver(string(dd)),
|
|
nwconfig.OptionDefaultDriver(string(dd)),
|
|
@@ -1514,7 +1523,7 @@ func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo {
|
|
// We check if sysInfo is not set here, to allow some test to
|
|
// We check if sysInfo is not set here, to allow some test to
|
|
// override the actual sysInfo.
|
|
// override the actual sysInfo.
|
|
if daemon.sysInfo == nil {
|
|
if daemon.sysInfo == nil {
|
|
- daemon.sysInfo = getSysInfo(daemon)
|
|
|
|
|
|
+ daemon.sysInfo = getSysInfo(daemon.config())
|
|
}
|
|
}
|
|
})
|
|
})
|
|
|
|
|