2024-01-25 10:18:44 +00:00
|
|
|
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
|
|
|
//go:build go1.19
|
|
|
|
|
2018-02-05 21:05:59 +00:00
|
|
|
package daemon // import "github.com/docker/docker/daemon"
|
2015-11-12 19:55:17 +00:00
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
import (
|
2023-06-23 00:33:17 +00:00
|
|
|
"context"
|
2016-03-10 04:33:21 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"net"
|
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"strings"
|
2016-07-20 23:11:28 +00:00
|
|
|
"time"
|
2016-03-10 04:33:21 +00:00
|
|
|
|
2023-09-13 15:41:45 +00:00
|
|
|
"github.com/containerd/log"
|
2016-09-06 18:18:12 +00:00
|
|
|
containertypes "github.com/docker/docker/api/types/container"
|
2023-08-26 13:24:46 +00:00
|
|
|
"github.com/docker/docker/api/types/events"
|
2016-09-06 18:18:12 +00:00
|
|
|
networktypes "github.com/docker/docker/api/types/network"
|
2016-03-10 04:33:21 +00:00
|
|
|
"github.com/docker/docker/container"
|
2022-08-17 21:13:49 +00:00
|
|
|
"github.com/docker/docker/daemon/config"
|
2016-03-10 04:33:21 +00:00
|
|
|
"github.com/docker/docker/daemon/network"
|
2018-01-11 19:53:06 +00:00
|
|
|
"github.com/docker/docker/errdefs"
|
2023-08-09 23:42:35 +00:00
|
|
|
"github.com/docker/docker/internal/multierror"
|
2023-11-04 13:12:20 +00:00
|
|
|
"github.com/docker/docker/internal/sliceutil"
|
2021-04-06 00:24:47 +00:00
|
|
|
"github.com/docker/docker/libnetwork"
|
|
|
|
"github.com/docker/docker/libnetwork/netlabel"
|
|
|
|
"github.com/docker/docker/libnetwork/options"
|
2023-07-28 17:11:00 +00:00
|
|
|
"github.com/docker/docker/libnetwork/scope"
|
2021-04-06 00:24:47 +00:00
|
|
|
"github.com/docker/docker/libnetwork/types"
|
2021-05-28 00:15:56 +00:00
|
|
|
"github.com/docker/docker/opts"
|
|
|
|
"github.com/docker/docker/pkg/stringid"
|
|
|
|
"github.com/docker/docker/runconfig"
|
|
|
|
"github.com/docker/go-connections/nat"
|
2016-03-10 04:33:21 +00:00
|
|
|
)
|
2015-11-12 19:55:17 +00:00
|
|
|
|
2023-11-13 11:22:04 +00:00
|
|
|
func ipAddresses(ips []net.IP) []string {
|
|
|
|
var addrs []string
|
|
|
|
for _, ip := range ips {
|
|
|
|
addrs = append(addrs, ip.String())
|
|
|
|
}
|
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
|
2022-08-17 21:13:49 +00:00
|
|
|
func (daemon *Daemon) buildSandboxOptions(cfg *config.Config, container *container.Container) ([]libnetwork.SandboxOption, error) {
|
2023-07-24 09:30:02 +00:00
|
|
|
var sboxOptions []libnetwork.SandboxOption
|
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), libnetwork.OptionDomainname(container.Config.Domainname))
|
2016-03-10 04:33:21 +00:00
|
|
|
|
|
|
|
if container.HostConfig.NetworkMode.IsHost() {
|
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox())
|
2016-04-06 20:45:43 +00:00
|
|
|
} else {
|
|
|
|
// OptionUseExternalKey is mandatory for userns support.
|
|
|
|
// But optional for non-userns support
|
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey())
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
2024-03-15 17:04:41 +00:00
|
|
|
// Add platform-specific Sandbox options.
|
|
|
|
if err := buildSandboxPlatformOptions(container, cfg, &sboxOptions); err != nil {
|
2016-03-10 04:33:21 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(container.HostConfig.DNS) > 0 {
|
2023-07-24 09:16:30 +00:00
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionDNS(container.HostConfig.DNS))
|
2022-08-17 21:13:49 +00:00
|
|
|
} else if len(cfg.DNS) > 0 {
|
2023-11-13 11:22:04 +00:00
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionDNS(ipAddresses(cfg.DNS)))
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
2023-07-23 21:10:17 +00:00
|
|
|
if len(container.HostConfig.DNSSearch) > 0 {
|
2023-07-24 09:16:30 +00:00
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(container.HostConfig.DNSSearch))
|
2023-07-23 21:10:17 +00:00
|
|
|
} else if len(cfg.DNSSearch) > 0 {
|
2023-07-24 09:16:30 +00:00
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(cfg.DNSSearch))
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
if len(container.HostConfig.DNSOptions) > 0 {
|
2023-07-24 09:16:30 +00:00
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(container.HostConfig.DNSOptions))
|
2022-08-17 21:13:49 +00:00
|
|
|
} else if len(cfg.DNSOptions) > 0 {
|
2023-07-24 09:16:30 +00:00
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(cfg.DNSOptions))
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, extraHost := range container.HostConfig.ExtraHosts {
|
|
|
|
// allow IPv6 addresses in extra hosts; only split on first ":"
|
2017-02-27 13:21:10 +00:00
|
|
|
if _, err := opts.ValidateExtraHost(extraHost); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-11-01 11:52:44 +00:00
|
|
|
host, ip, _ := strings.Cut(extraHost, ":")
|
2019-11-02 00:09:40 +00:00
|
|
|
// If the IP Address is a string called "host-gateway", replace this
|
|
|
|
// value with the IP address stored in the daemon level HostGatewayIP
|
|
|
|
// config variable
|
2022-11-01 11:52:44 +00:00
|
|
|
if ip == opts.HostGatewayName {
|
2022-08-17 21:13:49 +00:00
|
|
|
gateway := cfg.HostGatewayIP.String()
|
2019-11-02 00:09:40 +00:00
|
|
|
if gateway == "" {
|
|
|
|
return nil, fmt.Errorf("unable to derive the IP value for host-gateway")
|
|
|
|
}
|
2022-11-01 11:52:44 +00:00
|
|
|
ip = gateway
|
2019-11-02 00:09:40 +00:00
|
|
|
}
|
2022-11-01 11:52:44 +00:00
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(host, ip))
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
2023-07-24 09:30:02 +00:00
|
|
|
bindings := make(nat.PortMap)
|
2016-03-10 04:33:21 +00:00
|
|
|
if container.HostConfig.PortBindings != nil {
|
|
|
|
for p, b := range container.HostConfig.PortBindings {
|
|
|
|
bindings[p] = []nat.PortBinding{}
|
|
|
|
for _, bb := range b {
|
|
|
|
bindings[p] = append(bindings[p], nat.PortBinding{
|
|
|
|
HostIP: bb.HostIP,
|
|
|
|
HostPort: bb.HostPort,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-24 09:45:51 +00:00
|
|
|
// TODO(thaJeztah): Move this code to a method on nat.PortSet.
|
|
|
|
ports := make([]nat.Port, 0, len(container.Config.ExposedPorts))
|
|
|
|
for p := range container.Config.ExposedPorts {
|
|
|
|
ports = append(ports, p)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
nat.SortPortMap(ports, bindings)
|
2023-07-24 09:30:02 +00:00
|
|
|
|
|
|
|
var (
|
|
|
|
publishedPorts []types.PortBinding
|
|
|
|
exposedPorts []types.TransportPort
|
|
|
|
)
|
2016-03-10 04:33:21 +00:00
|
|
|
for _, port := range ports {
|
2023-07-24 09:45:51 +00:00
|
|
|
portProto := types.ParseProtocol(port.Proto())
|
|
|
|
portNum := uint16(port.Int())
|
|
|
|
exposedPorts = append(exposedPorts, types.TransportPort{
|
|
|
|
Proto: portProto,
|
|
|
|
Port: portNum,
|
|
|
|
})
|
|
|
|
|
2023-07-24 09:58:10 +00:00
|
|
|
for _, binding := range bindings[port] {
|
|
|
|
newP, err := nat.NewPort(nat.SplitProtoPort(binding.HostPort))
|
2016-03-10 04:33:21 +00:00
|
|
|
var portStart, portEnd int
|
|
|
|
if err == nil {
|
|
|
|
portStart, portEnd, err = newP.Range()
|
|
|
|
}
|
|
|
|
if err != nil {
|
2023-07-24 09:58:10 +00:00
|
|
|
return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding.HostPort, err)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
2023-07-24 09:54:57 +00:00
|
|
|
publishedPorts = append(publishedPorts, types.PortBinding{
|
|
|
|
Proto: portProto,
|
|
|
|
Port: portNum,
|
2023-07-24 09:58:10 +00:00
|
|
|
HostIP: net.ParseIP(binding.HostIP),
|
2023-07-24 09:54:57 +00:00
|
|
|
HostPort: uint16(portStart),
|
|
|
|
HostPortEnd: uint16(portEnd),
|
|
|
|
})
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
2023-07-24 09:58:10 +00:00
|
|
|
if container.HostConfig.PublishAllPorts && len(bindings[port]) == 0 {
|
2023-07-24 09:54:57 +00:00
|
|
|
publishedPorts = append(publishedPorts, types.PortBinding{
|
|
|
|
Proto: portProto,
|
|
|
|
Port: portNum,
|
|
|
|
})
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-24 09:30:02 +00:00
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionPortMapping(publishedPorts), libnetwork.OptionExposedPorts(exposedPorts))
|
2016-03-10 04:33:21 +00:00
|
|
|
|
2016-03-11 10:13:37 +00:00
|
|
|
// Legacy Link feature is supported only for the default bridge network.
|
2016-03-10 04:33:21 +00:00
|
|
|
// return if this call to build join options is not for default bridge network
|
2016-06-14 16:13:53 +00:00
|
|
|
// Legacy Link is only supported by docker run --link
|
2023-07-24 09:30:02 +00:00
|
|
|
defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName()
|
2016-07-22 22:42:26 +00:00
|
|
|
bridgeSettings, ok := container.NetworkSettings.Networks[defaultNetName]
|
2023-07-24 09:30:02 +00:00
|
|
|
if !ok || bridgeSettings.EndpointSettings == nil || bridgeSettings.EndpointID == "" {
|
2016-03-10 04:33:21 +00:00
|
|
|
return sboxOptions, nil
|
|
|
|
}
|
|
|
|
|
2016-06-14 16:13:53 +00:00
|
|
|
var (
|
2023-07-24 09:30:02 +00:00
|
|
|
childEndpoints []string
|
|
|
|
cEndpointID string
|
2016-06-14 16:13:53 +00:00
|
|
|
)
|
2023-07-24 09:30:02 +00:00
|
|
|
for linkAlias, child := range daemon.children(container) {
|
2016-03-10 04:33:21 +00:00
|
|
|
if !isLinkable(child) {
|
|
|
|
return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name)
|
|
|
|
}
|
|
|
|
_, alias := path.Split(linkAlias)
|
|
|
|
// allow access to the linked container via the alias, real name, and container hostname
|
|
|
|
aliasList := alias + " " + child.Config.Hostname
|
|
|
|
// only add the name if alias isn't equal to the name
|
|
|
|
if alias != child.Name[1:] {
|
|
|
|
aliasList = aliasList + " " + child.Name[1:]
|
|
|
|
}
|
2023-07-24 09:30:02 +00:00
|
|
|
defaultNW := child.NetworkSettings.Networks[defaultNetName]
|
|
|
|
if defaultNW.IPAddress != "" {
|
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, defaultNW.IPAddress))
|
2019-08-30 21:15:47 +00:00
|
|
|
}
|
2023-07-24 09:30:02 +00:00
|
|
|
if defaultNW.GlobalIPv6Address != "" {
|
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, defaultNW.GlobalIPv6Address))
|
2019-08-30 21:15:47 +00:00
|
|
|
}
|
2023-07-24 09:30:02 +00:00
|
|
|
cEndpointID = defaultNW.EndpointID
|
2016-06-14 16:13:53 +00:00
|
|
|
if cEndpointID != "" {
|
|
|
|
childEndpoints = append(childEndpoints, cEndpointID)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-24 09:30:02 +00:00
|
|
|
var parentEndpoints []string
|
2016-03-10 04:33:21 +00:00
|
|
|
for alias, parent := range daemon.parents(container) {
|
2022-08-17 21:13:49 +00:00
|
|
|
if cfg.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() {
|
2016-03-10 04:33:21 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
_, alias = path.Split(alias)
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress)
|
2023-07-24 09:30:02 +00:00
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate(parent.ID, alias, bridgeSettings.IPAddress))
|
2016-06-14 16:13:53 +00:00
|
|
|
if cEndpointID != "" {
|
|
|
|
parentEndpoints = append(parentEndpoints, cEndpointID)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-24 09:45:51 +00:00
|
|
|
sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(options.Generic{
|
2016-03-10 04:33:21 +00:00
|
|
|
netlabel.GenericData: options.Generic{
|
|
|
|
"ParentEndpoints": parentEndpoints,
|
|
|
|
"ChildEndpoints": childEndpoints,
|
|
|
|
},
|
2023-07-24 09:45:51 +00:00
|
|
|
}))
|
2016-03-10 04:33:21 +00:00
|
|
|
return sboxOptions, nil
|
|
|
|
}
|
|
|
|
|
2023-07-21 22:38:57 +00:00
|
|
|
func (daemon *Daemon) updateNetworkSettings(container *container.Container, n *libnetwork.Network, endpointConfig *networktypes.EndpointSettings) error {
|
2016-03-10 04:33:21 +00:00
|
|
|
if container.NetworkSettings == nil {
|
Prevent panic on network attach
In situations where `container.NetworkSettings` was not nil, but
`container.NetworkSettings.Networks` was, a panic could occur:
```
2019-06-10 15:26:50.548309 I | http: panic serving @: assignment to entry in nil map
goroutine 1376 [running]:
net/http.(*conn).serve.func1(0xc4211068c0)
/usr/local/go/src/net/http/server.go:1726 +0xd2
panic(0x558939d7e1e0, 0x55893a0c4410)
/usr/local/go/src/runtime/panic.go:502 +0x22d
github.com/docker/docker/daemon.(*Daemon).updateNetworkSettings(0xc42090c5a0, 0xc420fb6fc0, 0x55893a101140, 0xc4210e0540, 0xc42112aa80, 0xc4217d77a0, 0x0)
/go/src/github.com/docker/docker/daemon/container_operations.go:275 +0x40e
github.com/docker/docker/daemon.(*Daemon).updateNetworkConfig(0xc42090c5a0, 0xc420fb6fc0, 0x55893a101140, 0xc4210e0540, 0xc42112aa80, 0x55893a101101, 0xc4210e0540, 0x0)
/go/src/github.com/docker/docker/daemon/container_operations.go:683 +0x219
github.com/docker/docker/daemon.(*Daemon).connectToNetwork(0xc42090c5a0, 0xc420fb6fc0, 0xc420e8290f, 0x40, 0xc42112aa80, 0x558937eabd01, 0x0, 0x0)
/go/src/github.com/docker/docker/daemon/container_operations.go:728 +0x1cb
github.com/docker/docker/daemon.(*Daemon).ConnectToNetwork(0xc42090c5a0, 0xc420fb6fc0, 0xc420e8290f, 0x40, 0xc42112aa80, 0x0, 0x0)
/go/src/github.com/docker/docker/daemon/container_operations.go:1046 +0x2b3
github.com/docker/docker/daemon.(*Daemon).ConnectContainerToNetwork(0xc42090c5a0, 0xc4214ca580, 0x40, 0xc420e8290f, 0x40, 0xc42112aa80, 0x2, 0xe600000000000001)
/go/src/github.com/docker/docker/daemon/network.go:450 +0xa1
github.com/docker/docker/api/server/router/network.(*networkRouter).postNetworkConnect(0xc42121bbc0, 0x55893a0edee0, 0xc420de7cb0, 0x55893a0ec2e0, 0xc4207f0e00, 0xc420173600, 0xc420de7980, 0x5589394707cc, 0x5)
/go/src/github.com/docker/docker/api/server/router/network/network_routes.go:278 +0x330
github.com/docker/docker/api/server/router/network.(*networkRouter).(github.com/docker/docker/api/server/router/network.postNetworkConnect)-fm(0x55893a0edee0, 0xc420de7cb0, 0x55893a0ec2e0, 0xc4207f0e00, 0xc420173600, 0xc420de7980, 0x558937fd89dc, 0x558939f2cec0)
/go/src/github.com/docker/docker/api/server/router/network/network.go:37 +0x6b
github.com/docker/docker/api/server/middleware.ExperimentalMiddleware.WrapHandler.func1(0x55893a0edee0, 0xc420de7cb0, 0x55893a0ec2e0, 0xc4207f0e00, 0xc420173600, 0xc420de7980, 0x55893a0edee0, 0xc420de7cb0)
/go/src/github.com/docker/docker/api/server/middleware/experimental.go:26 +0xda
github.com/docker/docker/api/server/middleware.VersionMiddleware.WrapHandler.func1(0x55893a0edee0, 0xc420de7a70, 0x55893a0ec2e0, 0xc4207f0e00, 0xc420173600, 0xc420de7980, 0x0, 0x0)
/go/src/github.com/docker/docker/api/server/middleware/version.go:62 +0x401
github.com/docker/docker/pkg/authorization.(*Middleware).WrapHandler.func1(0x55893a0edee0, 0xc420de7a70, 0x55893a0ec2e0, 0xc4207f0e00, 0xc420173600, 0xc420de7980, 0x0, 0x558939640868)
/go/src/github.com/docker/docker/pkg/authorization/middleware.go:59 +0x7ab
github.com/docker/docker/api/server/middleware.DebugRequestMiddleware.func1(0x55893a0edee0, 0xc420de7a70, 0x55893a0ec2e0, 0xc4207f0e00, 0xc420173600, 0xc420de7980, 0x55893a0edee0, 0xc420de7a70)
/go/src/github.com/docker/docker/api/server/middleware/debug.go:53 +0x4b8
github.com/docker/docker/api/server.(*Server).makeHTTPHandler.func1(0x55893a0ec2e0, 0xc4207f0e00, 0xc420173600)
/go/src/github.com/docker/docker/api/server/server.go:141 +0x19a
net/http.HandlerFunc.ServeHTTP(0xc420e0c0e0, 0x55893a0ec2e0, 0xc4207f0e00, 0xc420173600)
/usr/local/go/src/net/http/server.go:1947 +0x46
github.com/docker/docker/vendor/github.com/gorilla/mux.(*Router).ServeHTTP(0xc420ce5950, 0x55893a0ec2e0, 0xc4207f0e00, 0xc420173600)
/go/src/github.com/docker/docker/vendor/github.com/gorilla/mux/mux.go:103 +0x228
github.com/docker/docker/api/server.(*routerSwapper).ServeHTTP(0xc421078330, 0x55893a0ec2e0, 0xc4207f0e00, 0xc420173600)
/go/src/github.com/docker/docker/api/server/router_swapper.go:29 +0x72
net/http.serverHandler.ServeHTTP(0xc420902f70, 0x55893a0ec2e0, 0xc4207f0e00, 0xc420173600)
/usr/local/go/src/net/http/server.go:2697 +0xbe
net/http.(*conn).serve(0xc4211068c0, 0x55893a0ede20, 0xc420d81440)
/usr/local/go/src/net/http/server.go:1830 +0x653
created by net/http.(*Server).Serve
/usr/local/go/src/net/http/server.go:2798 +0x27d
```
I have not been able to reproduce the situation, but preventing a panic should
not hurt.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-07-22 12:44:04 +00:00
|
|
|
container.NetworkSettings = &network.Settings{}
|
|
|
|
}
|
|
|
|
if container.NetworkSettings.Networks == nil {
|
|
|
|
container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() {
|
|
|
|
return runconfig.ErrConflictHostNetwork
|
|
|
|
}
|
|
|
|
|
2017-10-31 19:46:53 +00:00
|
|
|
for s, v := range container.NetworkSettings.Networks {
|
2018-01-15 17:26:43 +00:00
|
|
|
sn, err := daemon.FindNetwork(getNetworkID(s, v.EndpointSettings))
|
2016-03-10 04:33:21 +00:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if sn.Name() == n.Name() {
|
2018-01-09 06:41:48 +00:00
|
|
|
// If the network scope is swarm, then this
|
|
|
|
// is an attachable network, which may not
|
|
|
|
// be locally available previously.
|
|
|
|
// So always update.
|
2023-07-25 15:37:19 +00:00
|
|
|
if n.Scope() == scope.Swarm {
|
2018-01-09 06:41:48 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-03-10 04:33:21 +00:00
|
|
|
// Avoid duplicate config
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if !containertypes.NetworkMode(sn.Type()).IsPrivate() ||
|
|
|
|
!containertypes.NetworkMode(n.Type()).IsPrivate() {
|
|
|
|
return runconfig.ErrConflictSharedNetwork
|
|
|
|
}
|
|
|
|
if containertypes.NetworkMode(sn.Name()).IsNone() ||
|
|
|
|
containertypes.NetworkMode(n.Name()).IsNone() {
|
|
|
|
return runconfig.ErrConflictNoNetwork
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-09 06:41:48 +00:00
|
|
|
container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{
|
|
|
|
EndpointSettings: endpointConfig,
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-21 22:38:57 +00:00
|
|
|
func (daemon *Daemon) updateEndpointNetworkSettings(cfg *config.Config, container *container.Container, n *libnetwork.Network, ep *libnetwork.Endpoint) error {
|
2018-05-10 20:44:09 +00:00
|
|
|
if err := buildEndpointInfo(container.NetworkSettings, n, ep); err != nil {
|
2016-03-10 04:33:21 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() {
|
2022-08-17 21:13:49 +00:00
|
|
|
container.NetworkSettings.Bridge = cfg.BridgeConfig.Iface
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateNetwork is used to update the container's network (e.g. when linked containers
|
|
|
|
// get removed/unlinked).
|
2022-08-17 21:13:49 +00:00
|
|
|
func (daemon *Daemon) updateNetwork(cfg *config.Config, container *container.Container) error {
|
2016-07-20 23:11:28 +00:00
|
|
|
var (
|
|
|
|
start = time.Now()
|
|
|
|
ctrl = daemon.netController
|
|
|
|
sid = container.NetworkSettings.SandboxID
|
|
|
|
)
|
2016-03-10 04:33:21 +00:00
|
|
|
|
|
|
|
sb, err := ctrl.SandboxByID(sid)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error locating sandbox id %s: %v", sid, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find if container is connected to the default bridge network
|
2023-07-21 22:38:57 +00:00
|
|
|
var n *libnetwork.Network
|
2017-10-31 19:46:53 +00:00
|
|
|
for name, v := range container.NetworkSettings.Networks {
|
2018-01-15 17:26:43 +00:00
|
|
|
sn, err := daemon.FindNetwork(getNetworkID(name, v.EndpointSettings))
|
2016-03-10 04:33:21 +00:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if sn.Name() == runconfig.DefaultDaemonNetworkMode().NetworkName() {
|
|
|
|
n = sn
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if n == nil {
|
|
|
|
// Not connected to the default bridge network; Nothing to do
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-08-17 21:13:49 +00:00
|
|
|
sbOptions, err := daemon.buildSandboxOptions(cfg, container)
|
2016-03-10 04:33:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Update network failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-08-09 12:10:07 +00:00
|
|
|
if err := sb.Refresh(sbOptions...); err != nil {
|
2016-03-10 04:33:21 +00:00
|
|
|
return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err)
|
|
|
|
}
|
|
|
|
|
2016-07-20 23:11:28 +00:00
|
|
|
networkActions.WithValues("update").UpdateSince(start)
|
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-21 22:38:57 +00:00
|
|
|
func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrName string, epConfig *networktypes.EndpointSettings) (*libnetwork.Network, *networktypes.NetworkingConfig, error) {
|
Fix race in attachable network attachment
Attachable networks are networks created on the cluster which can then
be attached to by non-swarm containers. These networks are lazily
created on the node that wants to attach to that network.
When no container is currently attached to one of these networks on a
node, and then multiple containers which want that network are started
concurrently, this can cause a race condition in the network attachment
where essentially we try to attach the same network to the node twice.
To easily reproduce this issue you must use a multi-node cluster with a
worker node that has lots of CPUs (I used a 36 CPU node).
Repro steps:
1. On manager, `docker network create -d overlay --attachable test`
2. On worker, `docker create --restart=always --network test busybox
top`, many times... 200 is a good number (but not much more due to
subnet size restrictions)
3. Restart the daemon
When the daemon restarts, it will attempt to start all those containers
simultaneously. Note that you could try to do this yourself over the API,
but it's harder to trigger due to the added latency from going over
the API.
The error produced happens when the daemon tries to start the container
upon allocating the network resources:
```
attaching to network failed, make sure your network options are correct and check manager logs: context deadline exceeded
```
What happens here is the worker makes a network attachment request to
the manager. This is an async call which in the happy case would cause a
task to be placed on the node, which the worker is waiting for to get
the network configuration.
In the case of this race, the error ocurrs on the manager like this:
```
task allocation failure" error="failed during network allocation for task n7bwwwbymj2o2h9asqkza8gom: failed to allocate network IP for task n7bwwwbymj2o2h9asqkza8gom network rj4szie2zfauqnpgh4eri1yue: could not find an available IP" module=node node.id=u3489c490fx1df8onlyfo1v6e
```
The task is not created and the worker times out waiting for the task.
---
The mitigation for this is to make sure that only one attachment reuest
is in flight for a given network at a time *when the network doesn't
already exist on the node*. If the network already exists on the node
there is no need for synchronization because the network is already
allocated and on the node so there is no need to request it from the
manager.
This basically comes down to a race with `Find(network) ||
Create(network)` without any sort of syncronization.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2018-02-02 17:54:08 +00:00
|
|
|
id := getNetworkID(idOrName, epConfig)
|
|
|
|
|
|
|
|
n, err := daemon.FindNetwork(id)
|
2016-08-23 23:50:15 +00:00
|
|
|
if err != nil {
|
2023-07-23 21:58:31 +00:00
|
|
|
// We should always be able to find the network for a managed container.
|
2016-08-23 23:50:15 +00:00
|
|
|
if container.Managed {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we found a network and if it is not dynamically created
|
|
|
|
// we should never attempt to attach to that network here.
|
|
|
|
if n != nil {
|
2023-07-25 15:37:19 +00:00
|
|
|
if container.Managed || !n.Dynamic() {
|
2016-08-23 23:50:15 +00:00
|
|
|
return n, nil, nil
|
|
|
|
}
|
2019-04-24 18:24:39 +00:00
|
|
|
// Throw an error if the container is already attached to the network
|
|
|
|
if container.NetworkSettings.Networks != nil {
|
|
|
|
networkName := n.Name()
|
|
|
|
containerName := strings.TrimPrefix(container.Name, "/")
|
2019-08-09 12:10:07 +00:00
|
|
|
if nw, ok := container.NetworkSettings.Networks[networkName]; ok && nw.EndpointID != "" {
|
2019-05-03 05:00:34 +00:00
|
|
|
err := fmt.Errorf("%s is already attached to network %s", containerName, networkName)
|
|
|
|
return n, nil, errdefs.Conflict(err)
|
2019-04-24 18:24:39 +00:00
|
|
|
}
|
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var addresses []string
|
|
|
|
if epConfig != nil && epConfig.IPAMConfig != nil {
|
|
|
|
if epConfig.IPAMConfig.IPv4Address != "" {
|
|
|
|
addresses = append(addresses, epConfig.IPAMConfig.IPv4Address)
|
|
|
|
}
|
|
|
|
if epConfig.IPAMConfig.IPv6Address != "" {
|
|
|
|
addresses = append(addresses, epConfig.IPAMConfig.IPv6Address)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fix race in attachable network attachment
Attachable networks are networks created on the cluster which can then
be attached to by non-swarm containers. These networks are lazily
created on the node that wants to attach to that network.
When no container is currently attached to one of these networks on a
node, and then multiple containers which want that network are started
concurrently, this can cause a race condition in the network attachment
where essentially we try to attach the same network to the node twice.
To easily reproduce this issue you must use a multi-node cluster with a
worker node that has lots of CPUs (I used a 36 CPU node).
Repro steps:
1. On manager, `docker network create -d overlay --attachable test`
2. On worker, `docker create --restart=always --network test busybox
top`, many times... 200 is a good number (but not much more due to
subnet size restrictions)
3. Restart the daemon
When the daemon restarts, it will attempt to start all those containers
simultaneously. Note that you could try to do this yourself over the API,
but it's harder to trigger due to the added latency from going over
the API.
The error produced happens when the daemon tries to start the container
upon allocating the network resources:
```
attaching to network failed, make sure your network options are correct and check manager logs: context deadline exceeded
```
What happens here is the worker makes a network attachment request to
the manager. This is an async call which in the happy case would cause a
task to be placed on the node, which the worker is waiting for to get
the network configuration.
In the case of this race, the error ocurrs on the manager like this:
```
task allocation failure" error="failed during network allocation for task n7bwwwbymj2o2h9asqkza8gom: failed to allocate network IP for task n7bwwwbymj2o2h9asqkza8gom network rj4szie2zfauqnpgh4eri1yue: could not find an available IP" module=node node.id=u3489c490fx1df8onlyfo1v6e
```
The task is not created and the worker times out waiting for the task.
---
The mitigation for this is to make sure that only one attachment reuest
is in flight for a given network at a time *when the network doesn't
already exist on the node*. If the network already exists on the node
there is no need for synchronization because the network is already
allocated and on the node so there is no need to request it from the
manager.
This basically comes down to a race with `Find(network) ||
Create(network)` without any sort of syncronization.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2018-02-02 17:54:08 +00:00
|
|
|
if n == nil && daemon.attachableNetworkLock != nil {
|
|
|
|
daemon.attachableNetworkLock.Lock(id)
|
|
|
|
defer daemon.attachableNetworkLock.Unlock(id)
|
|
|
|
}
|
|
|
|
|
2023-07-23 21:58:31 +00:00
|
|
|
retryCount := 0
|
|
|
|
var nwCfg *networktypes.NetworkingConfig
|
2016-10-17 21:33:56 +00:00
|
|
|
for {
|
|
|
|
// In all other cases, attempt to attach to the network to
|
|
|
|
// trigger attachment in the swarm cluster manager.
|
2016-08-23 23:50:15 +00:00
|
|
|
if daemon.clusterProvider != nil {
|
2016-10-17 21:33:56 +00:00
|
|
|
var err error
|
2023-07-03 21:49:57 +00:00
|
|
|
nwCfg, err = daemon.clusterProvider.AttachNetwork(id, container.ID, addresses)
|
2016-10-17 21:33:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
2016-08-23 23:50:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fix race in attachable network attachment
Attachable networks are networks created on the cluster which can then
be attached to by non-swarm containers. These networks are lazily
created on the node that wants to attach to that network.
When no container is currently attached to one of these networks on a
node, and then multiple containers which want that network are started
concurrently, this can cause a race condition in the network attachment
where essentially we try to attach the same network to the node twice.
To easily reproduce this issue you must use a multi-node cluster with a
worker node that has lots of CPUs (I used a 36 CPU node).
Repro steps:
1. On manager, `docker network create -d overlay --attachable test`
2. On worker, `docker create --restart=always --network test busybox
top`, many times... 200 is a good number (but not much more due to
subnet size restrictions)
3. Restart the daemon
When the daemon restarts, it will attempt to start all those containers
simultaneously. Note that you could try to do this yourself over the API,
but it's harder to trigger due to the added latency from going over
the API.
The error produced happens when the daemon tries to start the container
upon allocating the network resources:
```
attaching to network failed, make sure your network options are correct and check manager logs: context deadline exceeded
```
What happens here is the worker makes a network attachment request to
the manager. This is an async call which in the happy case would cause a
task to be placed on the node, which the worker is waiting for to get
the network configuration.
In the case of this race, the error ocurrs on the manager like this:
```
task allocation failure" error="failed during network allocation for task n7bwwwbymj2o2h9asqkza8gom: failed to allocate network IP for task n7bwwwbymj2o2h9asqkza8gom network rj4szie2zfauqnpgh4eri1yue: could not find an available IP" module=node node.id=u3489c490fx1df8onlyfo1v6e
```
The task is not created and the worker times out waiting for the task.
---
The mitigation for this is to make sure that only one attachment reuest
is in flight for a given network at a time *when the network doesn't
already exist on the node*. If the network already exists on the node
there is no need for synchronization because the network is already
allocated and on the node so there is no need to request it from the
manager.
This basically comes down to a race with `Find(network) ||
Create(network)` without any sort of syncronization.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2018-02-02 17:54:08 +00:00
|
|
|
n, err = daemon.FindNetwork(id)
|
2016-10-17 21:33:56 +00:00
|
|
|
if err != nil {
|
|
|
|
if daemon.clusterProvider != nil {
|
Fix race in attachable network attachment
Attachable networks are networks created on the cluster which can then
be attached to by non-swarm containers. These networks are lazily
created on the node that wants to attach to that network.
When no container is currently attached to one of these networks on a
node, and then multiple containers which want that network are started
concurrently, this can cause a race condition in the network attachment
where essentially we try to attach the same network to the node twice.
To easily reproduce this issue you must use a multi-node cluster with a
worker node that has lots of CPUs (I used a 36 CPU node).
Repro steps:
1. On manager, `docker network create -d overlay --attachable test`
2. On worker, `docker create --restart=always --network test busybox
top`, many times... 200 is a good number (but not much more due to
subnet size restrictions)
3. Restart the daemon
When the daemon restarts, it will attempt to start all those containers
simultaneously. Note that you could try to do this yourself over the API,
but it's harder to trigger due to the added latency from going over
the API.
The error produced happens when the daemon tries to start the container
upon allocating the network resources:
```
attaching to network failed, make sure your network options are correct and check manager logs: context deadline exceeded
```
What happens here is the worker makes a network attachment request to
the manager. This is an async call which in the happy case would cause a
task to be placed on the node, which the worker is waiting for to get
the network configuration.
In the case of this race, the error ocurrs on the manager like this:
```
task allocation failure" error="failed during network allocation for task n7bwwwbymj2o2h9asqkza8gom: failed to allocate network IP for task n7bwwwbymj2o2h9asqkza8gom network rj4szie2zfauqnpgh4eri1yue: could not find an available IP" module=node node.id=u3489c490fx1df8onlyfo1v6e
```
The task is not created and the worker times out waiting for the task.
---
The mitigation for this is to make sure that only one attachment reuest
is in flight for a given network at a time *when the network doesn't
already exist on the node*. If the network already exists on the node
there is no need for synchronization because the network is already
allocated and on the node so there is no need to request it from the
manager.
This basically comes down to a race with `Find(network) ||
Create(network)` without any sort of syncronization.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2018-02-02 17:54:08 +00:00
|
|
|
if err := daemon.clusterProvider.DetachNetwork(id, container.ID); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err)
|
2016-10-17 21:33:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retry network attach again if we failed to
|
2017-01-19 07:29:28 +00:00
|
|
|
// find the network after successful
|
2016-10-17 21:33:56 +00:00
|
|
|
// attachment because the only reason that
|
|
|
|
// would happen is if some other container
|
|
|
|
// attached to the swarm scope network went down
|
|
|
|
// and removed the network while we were in
|
|
|
|
// the process of attaching.
|
2023-07-03 21:49:57 +00:00
|
|
|
if nwCfg != nil {
|
2016-10-17 21:33:56 +00:00
|
|
|
if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok {
|
|
|
|
if retryCount >= 5 {
|
|
|
|
return nil, nil, fmt.Errorf("could not find network %s after successful attachment", idOrName)
|
|
|
|
}
|
|
|
|
retryCount++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
break
|
2016-08-23 23:50:15 +00:00
|
|
|
}
|
|
|
|
|
2016-09-09 16:55:57 +00:00
|
|
|
// This container has attachment to a swarm scope
|
|
|
|
// network. Update the container network settings accordingly.
|
|
|
|
container.NetworkSettings.HasSwarmEndpoint = true
|
2023-07-03 21:49:57 +00:00
|
|
|
return n, nwCfg, nil
|
2016-08-23 23:50:15 +00:00
|
|
|
}
|
|
|
|
|
2016-12-28 16:29:15 +00:00
|
|
|
// updateContainerNetworkSettings updates the network settings
|
2016-09-23 05:06:30 +00:00
|
|
|
func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) {
|
2023-07-21 22:38:57 +00:00
|
|
|
var n *libnetwork.Network
|
2016-03-10 04:33:21 +00:00
|
|
|
|
|
|
|
mode := container.HostConfig.NetworkMode
|
|
|
|
if container.Config.NetworkDisabled || mode.IsContainer() {
|
2016-09-23 05:06:30 +00:00
|
|
|
return
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
networkName := mode.NetworkName()
|
2016-08-23 23:50:15 +00:00
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
if mode.IsUserDefined() {
|
2016-08-23 23:50:15 +00:00
|
|
|
var err error
|
|
|
|
|
2018-01-15 17:26:43 +00:00
|
|
|
n, err = daemon.FindNetwork(networkName)
|
2016-08-23 23:50:15 +00:00
|
|
|
if err == nil {
|
|
|
|
networkName = n.Name()
|
2016-06-14 02:52:49 +00:00
|
|
|
}
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
if container.NetworkSettings == nil {
|
|
|
|
container.NetworkSettings = &network.Settings{}
|
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
if len(endpointsConfig) > 0 {
|
2016-08-23 23:50:15 +00:00
|
|
|
if container.NetworkSettings.Networks == nil {
|
|
|
|
container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings)
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, epConfig := range endpointsConfig {
|
|
|
|
container.NetworkSettings.Networks[name] = &network.EndpointSettings{
|
|
|
|
EndpointSettings: epConfig,
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
// At this point, during container creation, epConfig.MacAddress is the
|
|
|
|
// configured value from the API. If there is no configured value, the
|
|
|
|
// same field will later be used to store a generated MAC address. So,
|
|
|
|
// remember the requested address now.
|
|
|
|
DesiredMacAddress: epConfig.MacAddress,
|
2016-08-23 23:50:15 +00:00
|
|
|
}
|
|
|
|
}
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
if container.NetworkSettings.Networks == nil {
|
2016-08-23 23:50:15 +00:00
|
|
|
container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings)
|
|
|
|
container.NetworkSettings.Networks[networkName] = &network.EndpointSettings{
|
|
|
|
EndpointSettings: &networktypes.EndpointSettings{},
|
|
|
|
}
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
if !mode.IsUserDefined() {
|
2016-09-23 05:06:30 +00:00
|
|
|
return
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
// Make sure to internally store the per network endpoint config by network name
|
|
|
|
if _, ok := container.NetworkSettings.Networks[networkName]; ok {
|
2016-09-23 05:06:30 +00:00
|
|
|
return
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
|
|
|
|
if n != nil {
|
|
|
|
if nwConfig, ok := container.NetworkSettings.Networks[n.ID()]; ok {
|
|
|
|
container.NetworkSettings.Networks[networkName] = nwConfig
|
|
|
|
delete(container.NetworkSettings.Networks, n.ID())
|
2016-09-23 05:06:30 +00:00
|
|
|
return
|
2016-08-23 23:50:15 +00:00
|
|
|
}
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-17 21:13:49 +00:00
|
|
|
func (daemon *Daemon) allocateNetwork(cfg *config.Config, container *container.Container) (retErr error) {
|
2016-03-10 04:33:21 +00:00
|
|
|
if daemon.netController == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-23 21:58:31 +00:00
|
|
|
start := time.Now()
|
2020-05-25 12:03:47 +00:00
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
// Cleanup any stale sandbox left over due to ungraceful daemon shutdown
|
2023-07-23 21:58:31 +00:00
|
|
|
if err := daemon.netController.SandboxDestroy(container.ID); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).WithError(err).Errorf("failed to cleanup up stale network sandbox for container %s", container.ID)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
2017-03-01 04:03:43 +00:00
|
|
|
if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
updateSettings := false
|
2017-03-01 04:03:43 +00:00
|
|
|
if len(container.NetworkSettings.Networks) == 0 {
|
2016-09-23 05:06:30 +00:00
|
|
|
daemon.updateContainerNetworkSettings(container, nil)
|
2016-03-10 04:33:21 +00:00
|
|
|
updateSettings = true
|
|
|
|
}
|
|
|
|
|
2016-05-26 07:20:53 +00:00
|
|
|
// always connect default network first since only default
|
|
|
|
// network mode support link and we need do some setting
|
2016-06-24 19:04:26 +00:00
|
|
|
// on sandbox initialize for link, but the sandbox only be initialized
|
2016-05-26 07:20:53 +00:00
|
|
|
// on first network connecting.
|
|
|
|
defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName()
|
|
|
|
if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok {
|
2016-09-09 16:55:57 +00:00
|
|
|
cleanOperationalData(nConf)
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
if err := daemon.connectToNetwork(cfg, container, defaultNetName, nConf, updateSettings); err != nil {
|
2016-05-26 07:20:53 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
|
2016-09-22 03:18:07 +00:00
|
|
|
// the intermediate map is necessary because "connectToNetwork" modifies "container.NetworkSettings.Networks"
|
|
|
|
networks := make(map[string]*network.EndpointSettings)
|
2016-08-23 23:50:15 +00:00
|
|
|
for n, epConf := range container.NetworkSettings.Networks {
|
2016-05-26 07:20:53 +00:00
|
|
|
if n == defaultNetName {
|
|
|
|
continue
|
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
|
2016-09-22 03:18:07 +00:00
|
|
|
networks[n] = epConf
|
2016-08-23 23:50:15 +00:00
|
|
|
}
|
|
|
|
|
2016-09-22 03:18:07 +00:00
|
|
|
for netName, epConf := range networks {
|
2016-09-09 16:55:57 +00:00
|
|
|
cleanOperationalData(epConf)
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
if err := daemon.connectToNetwork(cfg, container, netName, epConf, updateSettings); err != nil {
|
2016-03-10 04:33:21 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-22 09:38:26 +00:00
|
|
|
// If the container is not to be connected to any network,
|
|
|
|
// create its network sandbox now if not present
|
|
|
|
if len(networks) == 0 {
|
2023-08-12 12:38:43 +00:00
|
|
|
if _, err := daemon.netController.GetSandbox(container.ID); err != nil {
|
|
|
|
if !errdefs.IsNotFound(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-08-17 21:13:49 +00:00
|
|
|
sbOptions, err := daemon.buildSandboxOptions(cfg, container)
|
2017-03-22 09:38:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-08-09 12:10:07 +00:00
|
|
|
sb, err := daemon.netController.NewSandbox(container.ID, sbOptions...)
|
2017-03-22 09:38:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-08-12 11:48:41 +00:00
|
|
|
setNetworkSandbox(container, sb)
|
2017-03-22 09:38:26 +00:00
|
|
|
defer func() {
|
2020-05-25 12:07:22 +00:00
|
|
|
if retErr != nil {
|
2017-03-22 09:38:26 +00:00
|
|
|
sb.Delete()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 17:43:10 +00:00
|
|
|
if _, err := container.WriteHostConfig(); err != nil {
|
2016-07-20 23:11:28 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
networkActions.WithValues("allocate").UpdateSince(start)
|
|
|
|
return nil
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
2023-09-15 10:44:30 +00:00
|
|
|
// validateEndpointSettings checks whether the given epConfig is valid. The nw parameter can be nil, in which case it
|
|
|
|
// won't try to check if the endpoint IP addresses are within network's subnets.
|
2023-08-09 20:18:12 +00:00
|
|
|
func validateEndpointSettings(nw *libnetwork.Network, nwName string, epConfig *networktypes.EndpointSettings) error {
|
|
|
|
if epConfig == nil {
|
2016-03-10 04:33:21 +00:00
|
|
|
return nil
|
|
|
|
}
|
2023-08-09 20:18:12 +00:00
|
|
|
|
|
|
|
ipamConfig := &networktypes.EndpointIPAMConfig{}
|
|
|
|
if epConfig.IPAMConfig != nil {
|
|
|
|
ipamConfig = epConfig.IPAMConfig
|
|
|
|
}
|
|
|
|
|
2023-08-09 23:42:35 +00:00
|
|
|
var errs []error
|
|
|
|
|
2023-09-15 10:44:30 +00:00
|
|
|
// TODO(aker): move this into api/types/network/endpoint.go once enableIPOnPredefinedNetwork and
|
|
|
|
// serviceDiscoveryOnDefaultNetwork are removed.
|
2023-08-09 20:18:12 +00:00
|
|
|
if !containertypes.NetworkMode(nwName).IsUserDefined() {
|
|
|
|
hasStaticAddresses := ipamConfig.IPv4Address != "" || ipamConfig.IPv6Address != ""
|
|
|
|
// On Linux, user specified IP address is accepted only by networks with user specified subnets.
|
|
|
|
if hasStaticAddresses && !enableIPOnPredefinedNetwork() {
|
2023-08-09 23:42:35 +00:00
|
|
|
errs = append(errs, runconfig.ErrUnsupportedNetworkAndIP)
|
2019-03-20 09:15:10 +00:00
|
|
|
}
|
|
|
|
if len(epConfig.Aliases) > 0 && !serviceDiscoveryOnDefaultNetwork() {
|
2023-08-09 23:42:35 +00:00
|
|
|
errs = append(errs, runconfig.ErrUnsupportedNetworkAndAlias)
|
2019-03-20 09:15:10 +00:00
|
|
|
}
|
|
|
|
}
|
2023-08-09 20:18:12 +00:00
|
|
|
|
2023-09-15 10:44:30 +00:00
|
|
|
// TODO(aker): add a proper multierror.Append
|
|
|
|
if err := ipamConfig.Validate(); err != nil {
|
|
|
|
errs = append(errs, err.(interface{ Unwrap() []error }).Unwrap()...)
|
2023-08-10 09:18:15 +00:00
|
|
|
}
|
2023-08-09 20:18:12 +00:00
|
|
|
|
2023-09-15 10:44:30 +00:00
|
|
|
if nw != nil {
|
|
|
|
_, _, v4Configs, v6Configs := nw.IpamConfig()
|
2023-08-10 10:09:14 +00:00
|
|
|
|
2023-09-15 10:44:30 +00:00
|
|
|
var nwIPv4Subnets, nwIPv6Subnets []networktypes.NetworkSubnet
|
|
|
|
for _, nwIPAMConfig := range v4Configs {
|
|
|
|
nwIPv4Subnets = append(nwIPv4Subnets, nwIPAMConfig)
|
|
|
|
}
|
|
|
|
for _, nwIPAMConfig := range v6Configs {
|
|
|
|
nwIPv6Subnets = append(nwIPv6Subnets, nwIPAMConfig)
|
|
|
|
}
|
2023-08-10 10:09:14 +00:00
|
|
|
|
2023-09-15 10:44:30 +00:00
|
|
|
// TODO(aker): add a proper multierror.Append
|
|
|
|
if err := ipamConfig.IsInRange(nwIPv4Subnets, nwIPv6Subnets); err != nil {
|
|
|
|
errs = append(errs, err.(interface{ Unwrap() []error }).Unwrap()...)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-04 14:47:34 +00:00
|
|
|
if epConfig.MacAddress != "" {
|
|
|
|
_, err := net.ParseMAC(epConfig.MacAddress)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("invalid MAC address %s", epConfig.MacAddress)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-15 10:44:30 +00:00
|
|
|
if err := multierror.Join(errs...); err != nil {
|
|
|
|
return fmt.Errorf("invalid endpoint settings:\n%w", err)
|
2023-08-10 10:09:14 +00:00
|
|
|
}
|
2023-09-15 10:44:30 +00:00
|
|
|
|
|
|
|
return nil
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// cleanOperationalData resets the operational data from the passed endpoint settings
|
2016-08-23 23:50:15 +00:00
|
|
|
func cleanOperationalData(es *network.EndpointSettings) {
|
2016-03-10 04:33:21 +00:00
|
|
|
es.EndpointID = ""
|
|
|
|
es.Gateway = ""
|
|
|
|
es.IPAddress = ""
|
|
|
|
es.IPPrefixLen = 0
|
|
|
|
es.IPv6Gateway = ""
|
|
|
|
es.GlobalIPv6Address = ""
|
|
|
|
es.GlobalIPv6PrefixLen = 0
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
es.MacAddress = ""
|
2016-08-23 23:50:15 +00:00
|
|
|
if es.IPAMOperational {
|
|
|
|
es.IPAMConfig = nil
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
}
|
2016-03-10 04:33:21 +00:00
|
|
|
|
2023-07-21 22:38:57 +00:00
|
|
|
func (daemon *Daemon) updateNetworkConfig(container *container.Container, n *libnetwork.Network, endpointConfig *networktypes.EndpointSettings, updateSettings bool) error {
|
2024-02-12 19:57:46 +00:00
|
|
|
// Set up DNS names for a user defined network, and for the default 'nat'
|
|
|
|
// network on Windows (IsBridge() returns true for nat).
|
|
|
|
if containertypes.NetworkMode(n.Name()).IsUserDefined() ||
|
|
|
|
(serviceDiscoveryOnDefaultNetwork() && containertypes.NetworkMode(n.Name()).IsBridge()) {
|
2023-11-04 13:12:20 +00:00
|
|
|
endpointConfig.DNSNames = buildEndpointDNSNames(container, endpointConfig.Aliases)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
2023-08-09 20:18:12 +00:00
|
|
|
if err := validateEndpointSettings(n, n.Name(), endpointConfig); err != nil {
|
2024-01-22 11:47:42 +00:00
|
|
|
return errdefs.InvalidParameter(err)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if updateSettings {
|
2016-08-23 23:50:15 +00:00
|
|
|
if err := daemon.updateNetworkSettings(container, n, endpointConfig); err != nil {
|
|
|
|
return err
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
return nil
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
2023-11-04 13:12:20 +00:00
|
|
|
// buildEndpointDNSNames constructs the list of DNSNames that should be assigned to a given endpoint. The order within
|
|
|
|
// the returned slice is important as the first entry will be used to generate the PTR records (for IPv4 and v6)
|
|
|
|
// associated to this endpoint.
|
|
|
|
func buildEndpointDNSNames(ctr *container.Container, aliases []string) []string {
|
|
|
|
var dnsNames []string
|
|
|
|
|
|
|
|
if ctr.Name != "" {
|
|
|
|
dnsNames = append(dnsNames, strings.TrimPrefix(ctr.Name, "/"))
|
|
|
|
}
|
|
|
|
|
|
|
|
dnsNames = append(dnsNames, aliases...)
|
|
|
|
|
|
|
|
if ctr.ID != "" {
|
|
|
|
dnsNames = append(dnsNames, stringid.TruncateID(ctr.ID))
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctr.Config.Hostname != "" {
|
|
|
|
dnsNames = append(dnsNames, ctr.Config.Hostname)
|
|
|
|
}
|
|
|
|
|
|
|
|
return sliceutil.Dedup(dnsNames)
|
|
|
|
}
|
|
|
|
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
func (daemon *Daemon) connectToNetwork(cfg *config.Config, container *container.Container, idOrName string, endpointConfig *network.EndpointSettings, updateSettings bool) (retErr error) {
|
2016-07-20 23:11:28 +00:00
|
|
|
start := time.Now()
|
2016-08-23 23:50:15 +00:00
|
|
|
if container.HostConfig.NetworkMode.IsContainer() {
|
|
|
|
return runconfig.ErrConflictSharedNetwork
|
|
|
|
}
|
2023-07-23 21:58:31 +00:00
|
|
|
if cfg.DisableBridge && containertypes.NetworkMode(idOrName).IsBridge() {
|
2016-08-23 23:50:15 +00:00
|
|
|
container.Config.NetworkDisabled = true
|
|
|
|
return nil
|
|
|
|
}
|
2016-04-08 22:25:07 +00:00
|
|
|
if endpointConfig == nil {
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
endpointConfig = &network.EndpointSettings{
|
|
|
|
EndpointSettings: &networktypes.EndpointSettings{},
|
|
|
|
}
|
2016-04-08 22:25:07 +00:00
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
n, nwCfg, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig.EndpointSettings)
|
2016-03-10 04:33:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if n == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2023-07-23 21:58:31 +00:00
|
|
|
nwName := n.Name()
|
2016-03-10 04:33:21 +00:00
|
|
|
|
2023-09-06 15:50:56 +00:00
|
|
|
if idOrName != container.HostConfig.NetworkMode.NetworkName() {
|
|
|
|
if err := daemon.normalizeNetMode(container); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
endpointConfig.IPAMOperational = false
|
2023-07-03 21:49:57 +00:00
|
|
|
if nwCfg != nil {
|
2023-07-23 21:58:31 +00:00
|
|
|
if epConfig, ok := nwCfg.EndpointsConfig[nwName]; ok {
|
|
|
|
if endpointConfig.IPAMConfig == nil || (endpointConfig.IPAMConfig.IPv4Address == "" && endpointConfig.IPAMConfig.IPv6Address == "" && len(endpointConfig.IPAMConfig.LinkLocalIPs) == 0) {
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
endpointConfig.IPAMOperational = true
|
2016-08-23 23:50:15 +00:00
|
|
|
}
|
|
|
|
|
2016-10-29 22:02:15 +00:00
|
|
|
// copy IPAMConfig and NetworkID from epConfig via AttachNetwork
|
|
|
|
endpointConfig.IPAMConfig = epConfig.IPAMConfig
|
|
|
|
endpointConfig.NetworkID = epConfig.NetworkID
|
2016-08-23 23:50:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
if err := daemon.updateNetworkConfig(container, n, endpointConfig.EndpointSettings, updateSettings); err != nil {
|
2016-08-23 23:50:15 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-08-12 12:38:43 +00:00
|
|
|
// TODO(thaJeztah): should this fail early if no sandbox was found?
|
|
|
|
sb, _ := daemon.netController.GetSandbox(container.ID)
|
2023-11-13 11:22:04 +00:00
|
|
|
createOptions, err := buildCreateEndpointOptions(container, n, endpointConfig, sb, ipAddresses(cfg.DNS))
|
2016-03-10 04:33:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
endpointName := strings.TrimPrefix(container.Name, "/")
|
|
|
|
ep, err := n.CreateEndpoint(endpointName, createOptions...)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer func() {
|
2023-08-12 11:53:29 +00:00
|
|
|
if retErr != nil {
|
|
|
|
if err := ep.Delete(false); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Warnf("Could not rollback container connection to network %s", idOrName)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
container.NetworkSettings.Networks[nwName] = endpointConfig
|
2019-10-13 20:10:24 +00:00
|
|
|
|
|
|
|
delete(container.NetworkSettings.Networks, n.ID())
|
2016-03-10 04:33:21 +00:00
|
|
|
|
2022-08-17 21:13:49 +00:00
|
|
|
if err := daemon.updateEndpointNetworkSettings(cfg, container, n, ep); err != nil {
|
2016-03-10 04:33:21 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if sb == nil {
|
2022-08-17 21:13:49 +00:00
|
|
|
sbOptions, err := daemon.buildSandboxOptions(cfg, container)
|
2016-03-10 04:33:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-07-23 21:58:31 +00:00
|
|
|
sb, err = daemon.netController.NewSandbox(container.ID, sbOptions...)
|
2016-03-10 04:33:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-08-12 11:48:41 +00:00
|
|
|
setNetworkSandbox(container, sb)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
2018-05-10 20:44:09 +00:00
|
|
|
joinOptions, err := buildJoinOptions(container.NetworkSettings, n)
|
2016-03-10 04:33:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ep.Join(sb, joinOptions...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-09-18 06:30:39 +00:00
|
|
|
if !container.Managed {
|
|
|
|
// add container name/alias to DNS
|
|
|
|
if err := daemon.ActivateContainerServiceBinding(container.Name); err != nil {
|
|
|
|
return fmt.Errorf("Activate container service binding for %s failed: %v", container.Name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-10 20:44:09 +00:00
|
|
|
if err := updateJoinInfo(container.NetworkSettings, n, ep); err != nil {
|
2016-03-10 04:33:21 +00:00
|
|
|
return fmt.Errorf("Updating join info failed: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-05-25 20:47:38 +00:00
|
|
|
container.NetworkSettings.Ports = getPortMapInfo(sb)
|
|
|
|
|
2023-08-26 13:24:46 +00:00
|
|
|
daemon.LogNetworkEventWithAttributes(n, events.ActionConnect, map[string]string{"container": container.ID})
|
2016-07-20 23:11:28 +00:00
|
|
|
networkActions.WithValues("connect").UpdateSince(start)
|
2016-03-10 04:33:21 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-21 22:38:57 +00:00
|
|
|
func updateJoinInfo(networkSettings *network.Settings, n *libnetwork.Network, ep *libnetwork.Endpoint) error {
|
2018-05-10 20:44:09 +00:00
|
|
|
if ep == nil {
|
|
|
|
return errors.New("invalid enppoint whhile building portmap info")
|
|
|
|
}
|
|
|
|
|
|
|
|
if networkSettings == nil {
|
|
|
|
return errors.New("invalid network settings while building port map info")
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(networkSettings.Ports) == 0 {
|
|
|
|
pm, err := getEndpointPortMapInfo(ep)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
networkSettings.Ports = pm
|
|
|
|
}
|
|
|
|
|
|
|
|
epInfo := ep.Info()
|
|
|
|
if epInfo == nil {
|
|
|
|
// It is not an error to get an empty endpoint info
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if epInfo.Gateway() != nil {
|
|
|
|
networkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String()
|
|
|
|
}
|
|
|
|
if epInfo.GatewayIPv6().To16() != nil {
|
|
|
|
networkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-26 20:08:28 +00:00
|
|
|
// ForceEndpointDelete deletes an endpoint from a network forcefully
|
|
|
|
func (daemon *Daemon) ForceEndpointDelete(name string, networkName string) error {
|
2018-01-15 17:26:43 +00:00
|
|
|
n, err := daemon.FindNetwork(networkName)
|
2016-08-26 20:08:28 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
ep, err := n.EndpointByName(name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return ep.Delete(true)
|
|
|
|
}
|
|
|
|
|
2023-07-21 22:38:57 +00:00
|
|
|
func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n *libnetwork.Network, force bool) error {
|
2016-03-10 04:33:21 +00:00
|
|
|
var (
|
2023-01-12 01:42:24 +00:00
|
|
|
ep *libnetwork.Endpoint
|
2023-01-12 01:10:09 +00:00
|
|
|
sbox *libnetwork.Sandbox
|
2016-03-10 04:33:21 +00:00
|
|
|
)
|
2023-07-23 21:58:31 +00:00
|
|
|
n.WalkEndpoints(func(current *libnetwork.Endpoint) bool {
|
2016-03-10 04:33:21 +00:00
|
|
|
epInfo := current.Info()
|
|
|
|
if epInfo == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if sb := epInfo.Sandbox(); sb != nil {
|
|
|
|
if sb.ContainerID() == container.ID {
|
|
|
|
ep = current
|
|
|
|
sbox = sb
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
2023-07-23 21:58:31 +00:00
|
|
|
})
|
2016-03-10 04:33:21 +00:00
|
|
|
|
|
|
|
if ep == nil {
|
2023-07-23 21:58:31 +00:00
|
|
|
if force {
|
|
|
|
var err error
|
|
|
|
ep, err = n.EndpointByName(strings.TrimPrefix(container.Name, "/"))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return ep.Delete(force)
|
|
|
|
}
|
2016-12-26 16:36:12 +00:00
|
|
|
return fmt.Errorf("container %s is not connected to network %s", container.ID, n.Name())
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := ep.Leave(sbox); err != nil {
|
|
|
|
return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err)
|
|
|
|
}
|
|
|
|
|
2016-05-25 20:47:38 +00:00
|
|
|
container.NetworkSettings.Ports = getPortMapInfo(sbox)
|
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
if err := ep.Delete(false); err != nil {
|
|
|
|
return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(container.NetworkSettings.Networks, n.Name())
|
2016-08-23 23:50:15 +00:00
|
|
|
|
2017-02-28 10:11:59 +00:00
|
|
|
daemon.tryDetachContainerFromClusterNetwork(n, container)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-21 22:38:57 +00:00
|
|
|
func (daemon *Daemon) tryDetachContainerFromClusterNetwork(network *libnetwork.Network, container *container.Container) {
|
2023-07-25 15:37:19 +00:00
|
|
|
if !container.Managed && daemon.clusterProvider != nil && network.Dynamic() {
|
2017-02-28 10:11:59 +00:00
|
|
|
if err := daemon.clusterProvider.DetachNetwork(network.Name(), container.ID); err != nil {
|
2023-07-23 21:55:28 +00:00
|
|
|
log.G(context.TODO()).WithError(err).Warn("error detaching from network")
|
2017-02-28 10:11:59 +00:00
|
|
|
if err := daemon.clusterProvider.DetachNetwork(network.ID(), container.ID); err != nil {
|
2023-07-23 21:55:28 +00:00
|
|
|
log.G(context.TODO()).WithError(err).Warn("error detaching from network")
|
2016-11-03 22:44:45 +00:00
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
}
|
|
|
|
}
|
2023-08-26 13:24:46 +00:00
|
|
|
daemon.LogNetworkEventWithAttributes(network, events.ActionDisconnect, map[string]string{
|
2017-02-28 10:11:59 +00:00
|
|
|
"container": container.ID,
|
2023-07-23 21:55:28 +00:00
|
|
|
})
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
2023-09-06 15:50:56 +00:00
|
|
|
// normalizeNetMode checks whether the network mode references a network by a partial ID. In that case, it replaces the
|
|
|
|
// partial ID with the full network ID.
|
|
|
|
// TODO(aker): transform ID into name when the referenced network is one of the predefined.
|
|
|
|
func (daemon *Daemon) normalizeNetMode(container *container.Container) error {
|
|
|
|
if container.HostConfig.NetworkMode.IsUserDefined() {
|
|
|
|
netMode := container.HostConfig.NetworkMode.NetworkName()
|
|
|
|
nw, err := daemon.FindNetwork(netMode)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not find a network matching network mode %s: %w", netMode, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if netMode != nw.ID() && netMode != nw.Name() {
|
|
|
|
container.HostConfig.NetworkMode = containertypes.NetworkMode(nw.ID())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-08-17 21:13:49 +00:00
|
|
|
func (daemon *Daemon) initializeNetworking(cfg *config.Config, container *container.Container) error {
|
2016-03-10 04:33:21 +00:00
|
|
|
if container.HostConfig.NetworkMode.IsContainer() {
|
|
|
|
// we need to get the hosts files from the container to join
|
|
|
|
nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-12 22:20:23 +00:00
|
|
|
|
|
|
|
err = daemon.initializeNetworkingPaths(container, nc)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
container.Config.Hostname = nc.Config.Hostname
|
|
|
|
container.Config.Domainname = nc.Config.Domainname
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-23 21:28:57 +00:00
|
|
|
if container.HostConfig.NetworkMode.IsHost() && container.Config.Hostname == "" {
|
|
|
|
hn, err := os.Hostname()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
2023-07-23 21:28:57 +00:00
|
|
|
container.Config.Hostname = hn
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
2022-08-17 21:13:49 +00:00
|
|
|
if err := daemon.allocateNetwork(cfg, container); err != nil {
|
2016-03-10 04:33:21 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return container.BuildHostnameFile()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) {
|
|
|
|
nc, err := daemon.GetContainer(connectedContainerID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if containerID == nc.ID {
|
|
|
|
return nil, fmt.Errorf("cannot join own network")
|
|
|
|
}
|
|
|
|
if !nc.IsRunning() {
|
2023-07-23 21:24:07 +00:00
|
|
|
return nil, errdefs.Conflict(fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID))
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
if nc.IsRestarting() {
|
|
|
|
return nil, errContainerIsRestarting(connectedContainerID)
|
|
|
|
}
|
|
|
|
return nc, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (daemon *Daemon) releaseNetwork(container *container.Container) {
|
2016-07-20 23:11:28 +00:00
|
|
|
start := time.Now()
|
2021-06-15 10:46:43 +00:00
|
|
|
// If live-restore is enabled, the daemon cleans up dead containers when it starts up. In that case, the
|
|
|
|
// netController hasn't been initialized yet and so we can't proceed.
|
|
|
|
// TODO(aker): If we hit this case, the endpoint state won't be cleaned up (ie. no call to cleanOperationalData).
|
2016-06-14 16:13:53 +00:00
|
|
|
if daemon.netController == nil {
|
|
|
|
return
|
|
|
|
}
|
2021-06-15 10:46:43 +00:00
|
|
|
// If the container uses the network namespace of another container, it doesn't own it -- nothing to do here.
|
|
|
|
if container.HostConfig.NetworkMode.IsContainer() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if container.NetworkSettings == nil {
|
2016-03-10 04:33:21 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
container.NetworkSettings.Ports = nil
|
2023-07-23 21:58:31 +00:00
|
|
|
sid := container.NetworkSettings.SandboxID
|
2017-03-22 06:51:52 +00:00
|
|
|
if sid == "" {
|
2016-03-10 04:33:21 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-21 22:38:57 +00:00
|
|
|
var networks []*libnetwork.Network
|
2023-07-23 21:58:31 +00:00
|
|
|
for n, epSettings := range container.NetworkSettings.Networks {
|
2018-01-15 17:26:43 +00:00
|
|
|
if nw, err := daemon.FindNetwork(getNetworkID(n, epSettings.EndpointSettings)); err == nil {
|
2016-03-10 04:33:21 +00:00
|
|
|
networks = append(networks, nw)
|
|
|
|
}
|
2016-08-23 23:50:15 +00:00
|
|
|
|
|
|
|
if epSettings.EndpointSettings == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-03-10 04:33:21 +00:00
|
|
|
cleanOperationalData(epSettings)
|
|
|
|
}
|
|
|
|
|
|
|
|
sb, err := daemon.netController.SandboxByID(sid)
|
|
|
|
if err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Warnf("error locating sandbox id %s: %v", sid, err)
|
2016-03-10 04:33:21 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := sb.Delete(); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, nw := range networks {
|
2017-02-28 10:11:59 +00:00
|
|
|
daemon.tryDetachContainerFromClusterNetwork(nw, container)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
2016-07-20 23:11:28 +00:00
|
|
|
networkActions.WithValues("release").UpdateSince(start)
|
2016-03-10 04:33:21 +00:00
|
|
|
}
|
2016-09-21 19:02:20 +00:00
|
|
|
|
|
|
|
func errRemovalContainer(containerID string) error {
|
|
|
|
return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ConnectToNetwork connects a container to a network
|
|
|
|
func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error {
|
|
|
|
if endpointConfig == nil {
|
|
|
|
endpointConfig = &networktypes.EndpointSettings{}
|
|
|
|
}
|
2017-05-31 18:52:43 +00:00
|
|
|
container.Lock()
|
|
|
|
defer container.Unlock()
|
|
|
|
|
2016-09-21 19:02:20 +00:00
|
|
|
if !container.Running {
|
|
|
|
if container.RemovalInProgress || container.Dead {
|
|
|
|
return errRemovalContainer(container.ID)
|
|
|
|
}
|
|
|
|
|
2018-01-15 17:26:43 +00:00
|
|
|
n, err := daemon.FindNetwork(idOrName)
|
2016-09-21 19:02:20 +00:00
|
|
|
if err == nil && n != nil {
|
|
|
|
if err := daemon.updateNetworkConfig(container, n, endpointConfig, true); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
container.NetworkSettings.Networks[idOrName] = &network.EndpointSettings{
|
|
|
|
EndpointSettings: endpointConfig,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
Only restore a configured MAC addr on restart.
The API's EndpointConfig struct has a MacAddress field that's used for
both the configured address, and the current address (which may be generated).
A configured address must be restored when a container is restarted, but a
generated address must not.
The previous attempt to differentiate between the two, without adding a field
to the API's EndpointConfig that would show up in 'inspect' output, was a
field in the daemon's version of EndpointSettings, MACOperational. It did
not work, MACOperational was set to true when a configured address was
used. So, while it ensured addresses were regenerated, it failed to preserve
a configured address.
So, this change removes that code, and adds DesiredMacAddress to the wrapped
version of EndpointSettings, where it is persisted but does not appear in
'inspect' results. Its value is copied from MacAddress (the API field) when
a container is created.
Signed-off-by: Rob Murray <rob.murray@docker.com>
2024-01-26 18:00:32 +00:00
|
|
|
epc := &network.EndpointSettings{
|
|
|
|
EndpointSettings: endpointConfig,
|
|
|
|
}
|
|
|
|
if err := daemon.connectToNetwork(&daemon.config().Config, container, idOrName, epc, true); err != nil {
|
2016-09-21 19:02:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2017-02-23 23:12:18 +00:00
|
|
|
|
2017-06-22 14:46:26 +00:00
|
|
|
return container.CheckpointTo(daemon.containersReplica)
|
2016-09-21 19:02:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DisconnectFromNetwork disconnects container from network n.
|
|
|
|
func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, networkName string, force bool) error {
|
2018-01-15 17:26:43 +00:00
|
|
|
n, err := daemon.FindNetwork(networkName)
|
2017-05-31 18:52:43 +00:00
|
|
|
container.Lock()
|
|
|
|
defer container.Unlock()
|
|
|
|
|
2016-09-21 19:02:20 +00:00
|
|
|
if !container.Running || (err != nil && force) {
|
|
|
|
if container.RemovalInProgress || container.Dead {
|
|
|
|
return errRemovalContainer(container.ID)
|
|
|
|
}
|
|
|
|
// In case networkName is resolved we will use n.Name()
|
|
|
|
// this will cover the case where network id is passed.
|
|
|
|
if n != nil {
|
|
|
|
networkName = n.Name()
|
|
|
|
}
|
|
|
|
if _, ok := container.NetworkSettings.Networks[networkName]; !ok {
|
|
|
|
return fmt.Errorf("container %s is not connected to the network %s", container.ID, networkName)
|
|
|
|
}
|
|
|
|
delete(container.NetworkSettings.Networks, networkName)
|
|
|
|
} else if err == nil {
|
|
|
|
if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() {
|
|
|
|
return runconfig.ErrConflictHostNetwork
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := daemon.disconnectFromNetwork(container, n, false); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-22 14:46:26 +00:00
|
|
|
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
|
2017-02-23 23:12:18 +00:00
|
|
|
return err
|
2016-09-21 19:02:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if n != nil {
|
2023-08-26 13:24:46 +00:00
|
|
|
daemon.LogNetworkEventWithAttributes(n, events.ActionDisconnect, map[string]string{
|
2016-09-21 19:02:20 +00:00
|
|
|
"container": container.ID,
|
2017-02-22 22:02:20 +00:00
|
|
|
})
|
2016-09-21 19:02:20 +00:00
|
|
|
}
|
2017-02-22 22:02:20 +00:00
|
|
|
|
2016-09-21 19:02:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
2016-09-18 06:30:39 +00:00
|
|
|
|
|
|
|
// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response
|
|
|
|
func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error {
|
2019-08-09 12:10:07 +00:00
|
|
|
ctr, err := daemon.GetContainer(containerName)
|
2016-09-18 06:30:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-08-12 12:38:43 +00:00
|
|
|
sb, err := daemon.netController.GetSandbox(ctr.ID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to activate service binding for container %s: %w", containerName, err)
|
2016-09-18 06:30:39 +00:00
|
|
|
}
|
|
|
|
return sb.EnableService()
|
|
|
|
}
|
|
|
|
|
2016-12-28 16:29:15 +00:00
|
|
|
// DeactivateContainerServiceBinding removes this container from load balancer active rotation, and DNS response
|
2016-09-18 06:30:39 +00:00
|
|
|
func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error {
|
2019-08-09 12:10:07 +00:00
|
|
|
ctr, err := daemon.GetContainer(containerName)
|
2016-09-18 06:30:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-08-12 12:38:43 +00:00
|
|
|
sb, err := daemon.netController.GetSandbox(ctr.ID)
|
|
|
|
if err != nil {
|
2017-04-10 16:06:09 +00:00
|
|
|
// If the network sandbox is not found, then there is nothing to deactivate
|
2023-08-12 12:38:43 +00:00
|
|
|
log.G(context.TODO()).WithError(err).Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName)
|
2017-04-10 16:06:09 +00:00
|
|
|
return nil
|
2016-09-18 06:30:39 +00:00
|
|
|
}
|
|
|
|
return sb.DisableService()
|
|
|
|
}
|
2017-10-31 19:46:53 +00:00
|
|
|
|
|
|
|
func getNetworkID(name string, endpointSettings *networktypes.EndpointSettings) string {
|
|
|
|
// We only want to prefer NetworkID for user defined networks.
|
|
|
|
// For systems like bridge, none, etc. the name is preferred (otherwise restart may cause issues)
|
|
|
|
if containertypes.NetworkMode(name).IsUserDefined() && endpointSettings != nil && endpointSettings.NetworkID != "" {
|
|
|
|
return endpointSettings.NetworkID
|
|
|
|
}
|
|
|
|
return name
|
|
|
|
}
|
2018-05-10 20:44:09 +00:00
|
|
|
|
2023-08-12 11:48:41 +00:00
|
|
|
// setNetworkSandbox updates the sandbox ID and Key.
|
|
|
|
func setNetworkSandbox(c *container.Container, sb *libnetwork.Sandbox) {
|
2018-05-10 20:44:09 +00:00
|
|
|
c.NetworkSettings.SandboxID = sb.ID()
|
|
|
|
c.NetworkSettings.SandboxKey = sb.Key()
|
|
|
|
}
|