2015-07-02 05:00:48 +00:00
|
|
|
package libnetwork
|
|
|
|
|
|
|
|
import (
|
2023-06-23 00:33:17 +00:00
|
|
|
"context"
|
2015-07-02 05:00:48 +00:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2015-12-24 09:51:32 +00:00
|
|
|
"net"
|
2018-02-27 16:15:31 +00:00
|
|
|
"sort"
|
2015-12-24 09:51:32 +00:00
|
|
|
"strings"
|
2015-07-02 05:00:48 +00:00
|
|
|
"sync"
|
|
|
|
|
2023-06-23 00:33:17 +00:00
|
|
|
"github.com/containerd/containerd/log"
|
2021-04-06 00:24:47 +00:00
|
|
|
"github.com/docker/docker/libnetwork/etchosts"
|
|
|
|
"github.com/docker/docker/libnetwork/osl"
|
|
|
|
"github.com/docker/docker/libnetwork/types"
|
2015-07-02 05:00:48 +00:00
|
|
|
)
|
|
|
|
|
2016-01-11 10:15:20 +00:00
|
|
|
// SandboxOption is an option setter function type used to pass various options to
|
2015-07-02 05:00:48 +00:00
|
|
|
// NewNetContainer method. The various setter functions of type SandboxOption are
|
|
|
|
// provided by libnetwork, they look like ContainerOptionXXXX(...)
|
2023-01-12 01:10:09 +00:00
|
|
|
type SandboxOption func(sb *Sandbox)
|
2015-07-02 05:00:48 +00:00
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
func (sb *Sandbox) processOptions(options ...SandboxOption) {
|
2015-07-02 05:00:48 +00:00
|
|
|
for _, opt := range options {
|
|
|
|
if opt != nil {
|
|
|
|
opt(sb)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// Sandbox provides the control over the network container entity.
|
|
|
|
// It is a one to one mapping with the container.
|
|
|
|
type Sandbox struct {
|
2016-06-15 21:00:48 +00:00
|
|
|
id string
|
|
|
|
containerID string
|
|
|
|
config containerConfig
|
2016-12-19 02:27:13 +00:00
|
|
|
extDNS []extDNSEntry
|
2023-08-20 08:00:29 +00:00
|
|
|
osSbox *osl.Namespace
|
2023-01-11 22:43:32 +00:00
|
|
|
controller *Controller
|
2023-01-20 21:58:23 +00:00
|
|
|
resolver *Resolver
|
2016-06-15 21:00:48 +00:00
|
|
|
resolverOnce sync.Once
|
2023-01-12 01:42:24 +00:00
|
|
|
endpoints []*Endpoint
|
2016-06-15 21:00:48 +00:00
|
|
|
epPriority map[string]int
|
|
|
|
populatedEndpoints map[string]struct{}
|
|
|
|
joinLeaveDone chan struct{}
|
|
|
|
dbIndex uint64
|
|
|
|
dbExists bool
|
|
|
|
isStub bool
|
|
|
|
inDelete bool
|
|
|
|
ingress bool
|
2016-09-10 04:45:03 +00:00
|
|
|
ndotsSet bool
|
2018-05-18 21:10:14 +00:00
|
|
|
oslTypes []osl.SandboxType // slice of properties of this sandbox
|
2018-07-23 21:18:16 +00:00
|
|
|
loadBalancerNID string // NID that this SB is a load balancer for
|
2023-01-12 00:49:40 +00:00
|
|
|
mu sync.Mutex
|
2017-06-06 23:04:50 +00:00
|
|
|
// This mutex is used to serialize service related operation for an endpoint
|
|
|
|
// The lock is here because the endpoint is saved into the store so is not unique
|
2023-01-12 01:10:09 +00:00
|
|
|
service sync.Mutex
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// These are the container configs used to customize container /etc/hosts file.
|
|
|
|
type hostsPathConfig struct {
|
2022-09-03 21:20:23 +00:00
|
|
|
hostName string
|
|
|
|
domainName string
|
|
|
|
hostsPath string
|
|
|
|
originHostsPath string
|
|
|
|
extraHosts []extraHost
|
|
|
|
parentUpdates []parentUpdate
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type parentUpdate struct {
|
|
|
|
cid string
|
|
|
|
name string
|
|
|
|
ip string
|
|
|
|
}
|
|
|
|
|
|
|
|
type extraHost struct {
|
|
|
|
name string
|
|
|
|
IP string
|
|
|
|
}
|
|
|
|
|
|
|
|
// These are the container configs used to customize container /etc/resolv.conf file.
|
|
|
|
type resolvConfPathConfig struct {
|
2022-09-03 21:20:23 +00:00
|
|
|
resolvConfPath string
|
|
|
|
originResolvConfPath string
|
|
|
|
resolvConfHashFile string
|
|
|
|
dnsList []string
|
|
|
|
dnsSearchList []string
|
|
|
|
dnsOptionsList []string
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type containerConfig struct {
|
|
|
|
hostsPathConfig
|
|
|
|
resolvConfPathConfig
|
|
|
|
generic map[string]interface{}
|
|
|
|
useDefaultSandBox bool
|
2015-09-09 23:20:54 +00:00
|
|
|
useExternalKey bool
|
2015-12-07 22:45:51 +00:00
|
|
|
exposedPorts []types.TransportPort
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// ID returns the ID of the sandbox.
|
|
|
|
func (sb *Sandbox) ID() string {
|
2015-07-02 05:00:48 +00:00
|
|
|
return sb.id
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// ContainerID returns the container id associated to this sandbox.
|
|
|
|
func (sb *Sandbox) ContainerID() string {
|
2015-07-02 05:00:48 +00:00
|
|
|
return sb.containerID
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// Key returns the sandbox's key.
|
|
|
|
func (sb *Sandbox) Key() string {
|
2015-07-02 05:00:48 +00:00
|
|
|
if sb.config.useDefaultSandBox {
|
|
|
|
return osl.GenerateKey("default")
|
|
|
|
}
|
|
|
|
return osl.GenerateKey(sb.id)
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// Labels returns the sandbox's labels.
|
|
|
|
func (sb *Sandbox) Labels() map[string]interface{} {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
|
|
|
defer sb.mu.Unlock()
|
2015-12-07 22:45:51 +00:00
|
|
|
opts := make(map[string]interface{}, len(sb.config.generic))
|
|
|
|
for k, v := range sb.config.generic {
|
|
|
|
opts[k] = v
|
|
|
|
}
|
|
|
|
return opts
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// Delete destroys this container after detaching it from all connected endpoints.
|
|
|
|
func (sb *Sandbox) Delete() error {
|
2016-01-16 22:24:44 +00:00
|
|
|
return sb.delete(false)
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
func (sb *Sandbox) delete(force bool) error {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
2015-10-17 01:00:30 +00:00
|
|
|
if sb.inDelete {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Unlock()
|
2015-10-17 01:00:30 +00:00
|
|
|
return types.ForbiddenErrorf("another sandbox delete in progress")
|
|
|
|
}
|
|
|
|
// Set the inDelete flag. This will ensure that we don't
|
|
|
|
// update the store until we have completed all the endpoint
|
|
|
|
// leaves and deletes. And when endpoint leaves and deletes
|
|
|
|
// are completed then we can finally delete the sandbox object
|
|
|
|
// altogether from the data store. If the daemon exits
|
|
|
|
// ungracefully in the middle of a sandbox delete this way we
|
|
|
|
// will have all the references to the endpoints in the
|
|
|
|
// sandbox so that we can clean them up when we restart
|
|
|
|
sb.inDelete = true
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Unlock()
|
2015-10-17 01:00:30 +00:00
|
|
|
|
2015-07-02 05:00:48 +00:00
|
|
|
c := sb.controller
|
|
|
|
|
2015-09-02 01:55:53 +00:00
|
|
|
// Detach from all endpoints
|
2015-10-30 00:16:52 +00:00
|
|
|
retain := false
|
2023-01-12 01:42:24 +00:00
|
|
|
for _, ep := range sb.Endpoints() {
|
2016-04-06 16:11:45 +00:00
|
|
|
// gw network endpoint detach and removal are automatic
|
2016-09-01 21:27:36 +00:00
|
|
|
if ep.endpointInGWNetwork() && !force {
|
2016-04-06 16:11:45 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-11-03 01:54:22 +00:00
|
|
|
// Retain the sanbdox if we can't obtain the network from store.
|
|
|
|
if _, err := c.getNetworkFromStore(ep.getNetwork().ID()); err != nil {
|
2016-09-01 21:27:36 +00:00
|
|
|
if c.isDistributedControl() {
|
|
|
|
retain = true
|
|
|
|
}
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Warnf("Failed getting network for ep %s during sandbox %s delete: %v", ep.ID(), sb.ID(), err)
|
2015-11-03 01:54:22 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-01-16 22:24:44 +00:00
|
|
|
if !force {
|
|
|
|
if err := ep.Leave(sb); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err)
|
2016-01-16 22:24:44 +00:00
|
|
|
}
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
2015-10-08 03:01:38 +00:00
|
|
|
|
2016-01-16 22:24:44 +00:00
|
|
|
if err := ep.Delete(force); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Warnf("Failed deleting endpoint %s: %v\n", ep.ID(), err)
|
2015-10-08 03:01:38 +00:00
|
|
|
}
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
|
2015-10-30 00:16:52 +00:00
|
|
|
if retain {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
2015-10-30 00:16:52 +00:00
|
|
|
sb.inDelete = false
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Unlock()
|
2015-10-30 00:16:52 +00:00
|
|
|
return fmt.Errorf("could not cleanup all the endpoints in container %s / sandbox %s", sb.containerID, sb.id)
|
|
|
|
}
|
2015-10-21 05:50:23 +00:00
|
|
|
// Container is going away. Path cache in etchosts is most
|
|
|
|
// likely not required any more. Drop it.
|
|
|
|
etchosts.Drop(sb.config.hostsPath)
|
|
|
|
|
2015-12-24 09:51:32 +00:00
|
|
|
if sb.resolver != nil {
|
|
|
|
sb.resolver.Stop()
|
|
|
|
}
|
|
|
|
|
2015-11-01 06:15:15 +00:00
|
|
|
if sb.osSbox != nil && !sb.config.useDefaultSandBox {
|
2021-05-28 00:15:56 +00:00
|
|
|
if err := sb.osSbox.Destroy(); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).WithError(err).Warn("error destroying network sandbox")
|
2021-05-28 00:15:56 +00:00
|
|
|
}
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
|
2015-10-08 03:01:38 +00:00
|
|
|
if err := sb.storeDelete(); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Warnf("Failed to delete sandbox %s from store: %v", sb.ID(), err)
|
2015-10-08 03:01:38 +00:00
|
|
|
}
|
|
|
|
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Lock()
|
2016-06-10 15:58:19 +00:00
|
|
|
if sb.ingress {
|
|
|
|
c.ingressSandbox = nil
|
|
|
|
}
|
2015-07-02 05:00:48 +00:00
|
|
|
delete(c.sandboxes, sb.ID())
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Unlock()
|
2015-07-02 05:00:48 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// Rename changes the name of all attached Endpoints.
|
|
|
|
func (sb *Sandbox) Rename(name string) error {
|
2015-10-23 03:18:25 +00:00
|
|
|
var err error
|
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
for _, ep := range sb.Endpoints() {
|
2015-10-23 03:18:25 +00:00
|
|
|
if ep.endpointInGWNetwork() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
oldName := ep.Name()
|
|
|
|
lEp := ep
|
|
|
|
if err = ep.rename(name); err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2015-10-27 17:14:54 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2021-05-28 00:15:56 +00:00
|
|
|
if err2 := lEp.rename(oldName); err2 != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).WithField("old", oldName).WithField("origError", err).WithError(err2).Error("error renaming sandbox")
|
2021-05-28 00:15:56 +00:00
|
|
|
}
|
2015-10-27 17:14:54 +00:00
|
|
|
}
|
|
|
|
}()
|
2015-10-23 03:18:25 +00:00
|
|
|
}
|
2015-10-27 17:14:54 +00:00
|
|
|
|
2015-10-23 03:18:25 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// Refresh leaves all the endpoints, resets and re-applies the options,
|
|
|
|
// re-joins all the endpoints without destroying the osl sandbox
|
|
|
|
func (sb *Sandbox) Refresh(options ...SandboxOption) error {
|
2015-09-02 01:55:53 +00:00
|
|
|
// Store connected endpoints
|
2023-01-12 01:42:24 +00:00
|
|
|
epList := sb.Endpoints()
|
2015-09-02 01:55:53 +00:00
|
|
|
|
|
|
|
// Detach from all endpoints
|
|
|
|
for _, ep := range epList {
|
|
|
|
if err := ep.Leave(sb); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err)
|
2015-09-02 01:55:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-apply options
|
|
|
|
sb.config = containerConfig{}
|
|
|
|
sb.processOptions(options...)
|
|
|
|
|
|
|
|
// Setup discovery files
|
|
|
|
if err := sb.setupResolutionFiles(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-05-23 02:55:17 +00:00
|
|
|
// Re-connect to all endpoints
|
2015-09-02 01:55:53 +00:00
|
|
|
for _, ep := range epList {
|
|
|
|
if err := ep.Join(sb); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Warnf("Failed attach sandbox %s to endpoint %s: %v\n", sb.ID(), ep.ID(), err)
|
2015-09-02 01:55:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
func (sb *Sandbox) MarshalJSON() ([]byte, error) {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
|
|
|
defer sb.mu.Unlock()
|
2015-07-02 05:00:48 +00:00
|
|
|
|
|
|
|
// We are just interested in the container ID. This can be expanded to include all of containerInfo if there is a need
|
|
|
|
return json.Marshal(sb.id)
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
func (sb *Sandbox) UnmarshalJSON(b []byte) (err error) {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
|
|
|
defer sb.mu.Unlock()
|
2015-07-02 05:00:48 +00:00
|
|
|
|
|
|
|
var id string
|
|
|
|
if err := json.Unmarshal(b, &id); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sb.id = id
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// Endpoints returns all the endpoints connected to the sandbox.
|
2023-01-12 01:42:24 +00:00
|
|
|
func (sb *Sandbox) Endpoints() []*Endpoint {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
|
|
|
defer sb.mu.Unlock()
|
2016-01-26 00:23:00 +00:00
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
eps := make([]*Endpoint, len(sb.endpoints))
|
2018-03-08 10:45:04 +00:00
|
|
|
copy(eps, sb.endpoints)
|
2015-09-02 01:55:53 +00:00
|
|
|
|
|
|
|
return eps
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
func (sb *Sandbox) addEndpoint(ep *Endpoint) {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
|
|
|
defer sb.mu.Unlock()
|
2018-02-27 16:15:31 +00:00
|
|
|
|
|
|
|
l := len(sb.endpoints)
|
|
|
|
i := sort.Search(l, func(j int) bool {
|
|
|
|
return ep.Less(sb.endpoints[j])
|
|
|
|
})
|
|
|
|
|
|
|
|
sb.endpoints = append(sb.endpoints, nil)
|
|
|
|
copy(sb.endpoints[i+1:], sb.endpoints[i:])
|
|
|
|
sb.endpoints[i] = ep
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
func (sb *Sandbox) removeEndpoint(ep *Endpoint) {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
|
|
|
defer sb.mu.Unlock()
|
2015-12-07 22:45:51 +00:00
|
|
|
|
2018-02-27 16:15:31 +00:00
|
|
|
sb.removeEndpointRaw(ep)
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
func (sb *Sandbox) removeEndpointRaw(ep *Endpoint) {
|
2015-12-07 22:45:51 +00:00
|
|
|
for i, e := range sb.endpoints {
|
|
|
|
if e == ep {
|
2018-02-27 16:15:31 +00:00
|
|
|
sb.endpoints = append(sb.endpoints[:i], sb.endpoints[i+1:]...)
|
2015-12-07 22:45:51 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
func (sb *Sandbox) getEndpoint(id string) *Endpoint {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
|
|
|
defer sb.mu.Unlock()
|
2015-10-05 11:21:15 +00:00
|
|
|
|
|
|
|
for _, ep := range sb.endpoints {
|
|
|
|
if ep.id == id {
|
|
|
|
return ep
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
func (sb *Sandbox) HandleQueryResp(name string, ip net.IP) {
|
2023-01-12 01:42:24 +00:00
|
|
|
for _, ep := range sb.Endpoints() {
|
2016-12-06 22:56:24 +00:00
|
|
|
n := ep.getNetwork()
|
|
|
|
n.HandleQueryResp(name, ip)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
func (sb *Sandbox) ResolveIP(ip string) string {
|
2015-12-24 09:51:32 +00:00
|
|
|
var svc string
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("IP To resolve %v", ip)
|
2015-12-24 09:51:32 +00:00
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
for _, ep := range sb.Endpoints() {
|
2015-12-24 09:51:32 +00:00
|
|
|
n := ep.getNetwork()
|
2016-09-19 22:48:06 +00:00
|
|
|
svc = n.ResolveIP(ip)
|
|
|
|
if len(svc) != 0 {
|
|
|
|
return svc
|
2015-12-24 09:51:32 +00:00
|
|
|
}
|
|
|
|
}
|
2016-09-19 22:48:06 +00:00
|
|
|
|
2015-12-24 09:51:32 +00:00
|
|
|
return svc
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// ResolveService returns all the backend details about the containers or hosts
|
|
|
|
// backing a service. Its purpose is to satisfy an SRV query.
|
|
|
|
func (sb *Sandbox) ResolveService(name string) ([]*net.SRV, []net.IP) {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("Service name To resolve: %v", name)
|
2016-05-08 07:48:04 +00:00
|
|
|
|
2017-05-22 02:25:52 +00:00
|
|
|
// There are DNS implementations that allow SRV queries for names not in
|
2016-08-12 22:40:39 +00:00
|
|
|
// the format defined by RFC 2782. Hence specific validations checks are
|
|
|
|
// not done
|
2023-08-16 18:17:35 +00:00
|
|
|
if parts := strings.SplitN(name, ".", 3); len(parts) < 3 {
|
2016-09-19 22:48:06 +00:00
|
|
|
return nil, nil
|
2016-05-08 07:48:04 +00:00
|
|
|
}
|
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
for _, ep := range sb.Endpoints() {
|
2016-05-08 07:48:04 +00:00
|
|
|
n := ep.getNetwork()
|
|
|
|
|
2023-08-16 18:17:35 +00:00
|
|
|
srv, ip := n.ResolveService(name)
|
2016-05-08 07:48:04 +00:00
|
|
|
if len(srv) > 0 {
|
2023-08-16 18:17:35 +00:00
|
|
|
return srv, ip
|
2016-05-08 07:48:04 +00:00
|
|
|
}
|
|
|
|
}
|
2023-08-16 18:17:35 +00:00
|
|
|
return nil, nil
|
2016-05-08 07:48:04 +00:00
|
|
|
}
|
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
func getDynamicNwEndpoints(epList []*Endpoint) []*Endpoint {
|
|
|
|
eps := []*Endpoint{}
|
2016-08-11 00:44:33 +00:00
|
|
|
for _, ep := range epList {
|
|
|
|
n := ep.getNetwork()
|
|
|
|
if n.dynamic && !n.ingress {
|
|
|
|
eps = append(eps, ep)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return eps
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
func getIngressNwEndpoint(epList []*Endpoint) *Endpoint {
|
2016-08-11 00:44:33 +00:00
|
|
|
for _, ep := range epList {
|
|
|
|
n := ep.getNetwork()
|
|
|
|
if n.ingress {
|
|
|
|
return ep
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
func getLocalNwEndpoints(epList []*Endpoint) []*Endpoint {
|
|
|
|
eps := []*Endpoint{}
|
2016-08-11 00:44:33 +00:00
|
|
|
for _, ep := range epList {
|
|
|
|
n := ep.getNetwork()
|
|
|
|
if !n.dynamic && !n.ingress {
|
|
|
|
eps = append(eps, ep)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return eps
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
func (sb *Sandbox) ResolveName(name string, ipType int) ([]net.IP, bool) {
|
2016-01-19 08:40:46 +00:00
|
|
|
// Embedded server owns the docker network domain. Resolution should work
|
|
|
|
// for both container_name and container_name.network_name
|
|
|
|
// We allow '.' in service name and network name. For a name a.b.c.d the
|
|
|
|
// following have to tried;
|
|
|
|
// {a.b.c.d in the networks container is connected to}
|
|
|
|
// {a.b.c in network d},
|
|
|
|
// {a.b in network c.d},
|
|
|
|
// {a in network b.c.d},
|
|
|
|
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("Name To resolve: %v", name)
|
2016-01-19 08:40:46 +00:00
|
|
|
name = strings.TrimSuffix(name, ".")
|
|
|
|
reqName := []string{name}
|
|
|
|
networkName := []string{""}
|
|
|
|
|
|
|
|
if strings.Contains(name, ".") {
|
|
|
|
var i int
|
|
|
|
dup := name
|
|
|
|
for {
|
|
|
|
if i = strings.LastIndex(dup, "."); i == -1 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
networkName = append(networkName, name[i+1:])
|
|
|
|
reqName = append(reqName, name[:i])
|
|
|
|
|
|
|
|
dup = dup[:i]
|
|
|
|
}
|
2016-01-04 22:02:03 +00:00
|
|
|
}
|
2016-01-19 08:40:46 +00:00
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
epList := sb.Endpoints()
|
2016-08-11 00:44:33 +00:00
|
|
|
|
|
|
|
// In swarm mode services with exposed ports are connected to user overlay
|
|
|
|
// network, ingress network and docker_gwbridge network. Name resolution
|
|
|
|
// should prioritize returning the VIP/IPs on user overlay network.
|
2023-01-12 01:42:24 +00:00
|
|
|
newList := []*Endpoint{}
|
2016-08-11 00:44:33 +00:00
|
|
|
if !sb.controller.isDistributedControl() {
|
|
|
|
newList = append(newList, getDynamicNwEndpoints(epList)...)
|
2016-08-12 00:51:13 +00:00
|
|
|
ingressEP := getIngressNwEndpoint(epList)
|
|
|
|
if ingressEP != nil {
|
|
|
|
newList = append(newList, ingressEP)
|
|
|
|
}
|
2016-08-11 00:44:33 +00:00
|
|
|
newList = append(newList, getLocalNwEndpoints(epList)...)
|
|
|
|
epList = newList
|
|
|
|
}
|
|
|
|
|
2016-01-19 08:40:46 +00:00
|
|
|
for i := 0; i < len(reqName); i++ {
|
|
|
|
// First check for local container alias
|
2016-03-19 10:07:08 +00:00
|
|
|
ip, ipv6Miss := sb.resolveName(reqName[i], networkName[i], epList, true, ipType)
|
2016-01-19 08:40:46 +00:00
|
|
|
if ip != nil {
|
2016-03-19 10:07:08 +00:00
|
|
|
return ip, false
|
|
|
|
}
|
|
|
|
if ipv6Miss {
|
|
|
|
return ip, ipv6Miss
|
2016-01-19 08:40:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Resolve the actual container name
|
2016-03-19 10:07:08 +00:00
|
|
|
ip, ipv6Miss = sb.resolveName(reqName[i], networkName[i], epList, false, ipType)
|
2016-01-19 08:40:46 +00:00
|
|
|
if ip != nil {
|
2016-03-19 10:07:08 +00:00
|
|
|
return ip, false
|
|
|
|
}
|
|
|
|
if ipv6Miss {
|
|
|
|
return ip, ipv6Miss
|
2016-01-19 08:40:46 +00:00
|
|
|
}
|
|
|
|
}
|
2016-03-19 10:07:08 +00:00
|
|
|
return nil, false
|
2016-01-04 22:02:03 +00:00
|
|
|
}
|
|
|
|
|
2023-08-17 10:43:34 +00:00
|
|
|
func (sb *Sandbox) resolveName(nameOrAlias string, networkName string, epList []*Endpoint, lookupAlias bool, ipType int) (_ []net.IP, ipv6Miss bool) {
|
2016-01-04 22:02:03 +00:00
|
|
|
for _, ep := range epList {
|
2023-08-17 10:48:37 +00:00
|
|
|
if lookupAlias && len(ep.aliases) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
2015-12-24 09:51:32 +00:00
|
|
|
|
2023-08-17 10:48:37 +00:00
|
|
|
nw := ep.getNetwork()
|
2023-08-17 10:43:34 +00:00
|
|
|
if networkName != "" && networkName != nw.Name() {
|
2015-12-24 09:51:32 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-08-17 10:48:37 +00:00
|
|
|
name := nameOrAlias
|
2023-08-17 10:43:34 +00:00
|
|
|
if lookupAlias {
|
2023-01-11 22:51:59 +00:00
|
|
|
ep.mu.Lock()
|
2023-08-17 10:50:54 +00:00
|
|
|
alias, ok := ep.aliases[nameOrAlias]
|
2023-01-11 22:51:59 +00:00
|
|
|
ep.mu.Unlock()
|
2016-01-04 22:02:03 +00:00
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
2023-08-17 10:50:54 +00:00
|
|
|
name = alias
|
2016-01-04 22:02:03 +00:00
|
|
|
} else {
|
|
|
|
// If it is a regular lookup and if the requested name is an alias
|
2016-02-23 21:44:38 +00:00
|
|
|
// don't perform a svc lookup for this endpoint.
|
2023-01-11 22:51:59 +00:00
|
|
|
ep.mu.Lock()
|
2023-08-17 10:50:54 +00:00
|
|
|
_, ok := ep.aliases[nameOrAlias]
|
|
|
|
ep.mu.Unlock()
|
|
|
|
if ok {
|
2016-01-04 22:02:03 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-17 10:43:34 +00:00
|
|
|
ip, miss := nw.ResolveName(name, ipType)
|
2016-03-19 10:07:08 +00:00
|
|
|
if ip != nil {
|
|
|
|
return ip, false
|
2015-12-24 09:51:32 +00:00
|
|
|
}
|
2016-09-19 22:48:06 +00:00
|
|
|
if miss {
|
|
|
|
ipv6Miss = miss
|
|
|
|
}
|
2015-12-24 09:51:32 +00:00
|
|
|
}
|
2016-03-19 10:07:08 +00:00
|
|
|
return nil, ipv6Miss
|
2015-12-24 09:51:32 +00:00
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// EnableService makes a managed container's service available by adding the
|
|
|
|
// endpoint to the service load balancer and service discovery.
|
|
|
|
func (sb *Sandbox) EnableService() (err error) {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("EnableService %s START", sb.containerID)
|
2018-01-08 22:32:49 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2021-05-28 00:15:56 +00:00
|
|
|
if err2 := sb.DisableService(); err2 != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).WithError(err2).WithField("origError", err).Error("Error while disabling service after original error")
|
2021-05-28 00:15:56 +00:00
|
|
|
}
|
2018-01-08 22:32:49 +00:00
|
|
|
}
|
|
|
|
}()
|
2023-01-12 01:42:24 +00:00
|
|
|
for _, ep := range sb.Endpoints() {
|
2018-01-08 22:32:49 +00:00
|
|
|
if !ep.isServiceEnabled() {
|
2017-06-06 23:04:50 +00:00
|
|
|
if err := ep.addServiceInfoToCluster(sb); err != nil {
|
2016-08-21 05:55:00 +00:00
|
|
|
return fmt.Errorf("could not update state for endpoint %s into cluster: %v", ep.Name(), err)
|
|
|
|
}
|
2018-01-08 22:32:49 +00:00
|
|
|
ep.enableService()
|
2016-08-21 05:55:00 +00:00
|
|
|
}
|
|
|
|
}
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("EnableService %s DONE", sb.containerID)
|
2016-08-21 05:55:00 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
// DisableService removes a managed container's endpoints from the load balancer
|
|
|
|
// and service discovery.
|
|
|
|
func (sb *Sandbox) DisableService() (err error) {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("DisableService %s START", sb.containerID)
|
2018-01-08 22:32:49 +00:00
|
|
|
failedEps := []string{}
|
|
|
|
defer func() {
|
|
|
|
if len(failedEps) > 0 {
|
|
|
|
err = fmt.Errorf("failed to disable service on sandbox:%s, for endpoints %s", sb.ID(), strings.Join(failedEps, ","))
|
|
|
|
}
|
|
|
|
}()
|
2023-01-12 01:42:24 +00:00
|
|
|
for _, ep := range sb.Endpoints() {
|
2018-01-08 22:32:49 +00:00
|
|
|
if ep.isServiceEnabled() {
|
Gracefully remove LB endpoints from services
This patch attempts to allow endpoints to complete servicing connections
while being removed from a service. The change adds a flag to the
endpoint.deleteServiceInfoFromCluster() method to indicate whether this
removal should fully remove connectivity through the load balancer
to the endpoint or should just disable directing further connections to
the endpoint. If the flag is 'false', then the load balancer assigns
a weight of 0 to the endpoint but does not remove it as a linux load
balancing destination. It does remove the endpoint as a docker load
balancing endpoint but tracks it in a special map of "disabled-but-not-
destroyed" load balancing endpoints. This allows traffic to continue
flowing, at least under Linux. If the flag is 'true', then the code
removes the endpoint entirely as a load balancing destination.
The sandbox.DisableService() method invokes deleteServiceInfoFromCluster()
with the flag sent to 'false', while the endpoint.sbLeave() method invokes
it with the flag set to 'true' to complete the removal on endpoint
finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster()
with the flag set to 'true' because renaming attempts to completely
remove and then re-add each endpoint service entry.
The controller.rmServiceBinding() method, which carries out the operation,
similarly gets a new flag for whether to fully remove the endpoint. If
the flag is false, it does the job of moving the endpoint from the
load balancing set to the 'disabled' set. It then removes or
de-weights the entry in the OS load balancing table via
network.rmLBBackend(). It removes the service entirely via said method
ONLY IF there are no more live or disabled load balancing endpoints.
Similarly network.addLBBackend() requires slight tweaking to properly
manage the disabled set.
Finally, this change requires propagating the status of disabled
service endpoints via the networkDB. Accordingly, the patch includes
both code to generate and handle service update messages. It also
augments the service structure with a ServiceDisabled boolean to convey
whether an endpoint should ultimately be removed or just disabled.
This, naturally, required a rebuild of the protocol buffer code as well.
Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
|
|
|
if err := ep.deleteServiceInfoFromCluster(sb, false, "DisableService"); err != nil {
|
2018-01-08 22:32:49 +00:00
|
|
|
failedEps = append(failedEps, ep.Name())
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Warnf("failed update state for endpoint %s into cluster: %v", ep.Name(), err)
|
2018-01-08 22:32:49 +00:00
|
|
|
}
|
|
|
|
ep.disableService()
|
|
|
|
}
|
2016-08-21 05:55:00 +00:00
|
|
|
}
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Debugf("DisableService %s DONE", sb.containerID)
|
2016-08-21 05:55:00 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-12 01:42:24 +00:00
|
|
|
func (sb *Sandbox) clearNetworkResources(origEp *Endpoint) error {
|
2015-10-05 11:21:15 +00:00
|
|
|
ep := sb.getEndpoint(origEp.id)
|
|
|
|
if ep == nil {
|
|
|
|
return fmt.Errorf("could not find the sandbox endpoint data for endpoint %s",
|
2016-03-03 16:23:10 +00:00
|
|
|
origEp.id)
|
2015-10-05 11:21:15 +00:00
|
|
|
}
|
|
|
|
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
2015-09-09 23:20:54 +00:00
|
|
|
osSbox := sb.osSbox
|
2015-10-17 01:00:30 +00:00
|
|
|
inDelete := sb.inDelete
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Unlock()
|
2015-09-09 23:20:54 +00:00
|
|
|
if osSbox != nil {
|
2018-10-09 14:04:31 +00:00
|
|
|
releaseOSSboxResources(osSbox, ep)
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
2016-10-18 17:20:05 +00:00
|
|
|
delete(sb.populatedEndpoints, ep.ID())
|
|
|
|
|
2015-07-02 05:00:48 +00:00
|
|
|
if len(sb.endpoints) == 0 {
|
|
|
|
// sb.endpoints should never be empty and this is unexpected error condition
|
|
|
|
// We log an error message to note this down for debugging purposes.
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Errorf("No endpoints in sandbox while trying to remove endpoint %s", ep.Name())
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Unlock()
|
2015-07-02 05:00:48 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
2023-01-12 01:42:24 +00:00
|
|
|
gwepBefore, gwepAfter *Endpoint
|
2015-09-19 00:33:55 +00:00
|
|
|
index = -1
|
2015-07-02 05:00:48 +00:00
|
|
|
)
|
2015-09-19 00:33:55 +00:00
|
|
|
for i, e := range sb.endpoints {
|
2015-07-02 05:00:48 +00:00
|
|
|
if e == ep {
|
2015-09-19 00:33:55 +00:00
|
|
|
index = i
|
|
|
|
}
|
|
|
|
if len(e.Gateway()) > 0 && gwepBefore == nil {
|
|
|
|
gwepBefore = e
|
|
|
|
}
|
|
|
|
if index != -1 && gwepBefore != nil {
|
2015-07-02 05:00:48 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2017-09-05 21:33:27 +00:00
|
|
|
|
|
|
|
if index == -1 {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(context.TODO()).Warnf("Endpoint %s has already been deleted", ep.Name())
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Unlock()
|
2017-09-05 21:33:27 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-02-27 16:15:31 +00:00
|
|
|
sb.removeEndpointRaw(ep)
|
2015-09-19 00:33:55 +00:00
|
|
|
for _, e := range sb.endpoints {
|
|
|
|
if len(e.Gateway()) > 0 {
|
|
|
|
gwepAfter = e
|
|
|
|
break
|
|
|
|
}
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
delete(sb.epPriority, ep.ID())
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Unlock()
|
2015-07-02 05:00:48 +00:00
|
|
|
|
2015-09-19 00:33:55 +00:00
|
|
|
if gwepAfter != nil && gwepBefore != gwepAfter {
|
2021-05-28 00:15:56 +00:00
|
|
|
if err := sb.updateGateway(gwepAfter); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
|
2015-10-17 01:00:30 +00:00
|
|
|
// Only update the store if we did not come here as part of
|
|
|
|
// sandbox delete. If we came here as part of delete then do
|
|
|
|
// not bother updating the store. The sandbox object will be
|
|
|
|
// deleted anyway
|
|
|
|
if !inDelete {
|
|
|
|
return sb.storeUpdate()
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
|
2015-09-19 00:33:55 +00:00
|
|
|
// joinLeaveStart waits to ensure there are no joins or leaves in progress and
|
|
|
|
// marks this join/leave in progress without race
|
2023-01-12 01:10:09 +00:00
|
|
|
func (sb *Sandbox) joinLeaveStart() {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
|
|
|
defer sb.mu.Unlock()
|
2015-09-19 00:33:55 +00:00
|
|
|
|
|
|
|
for sb.joinLeaveDone != nil {
|
|
|
|
joinLeaveDone := sb.joinLeaveDone
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Unlock()
|
2015-09-19 00:33:55 +00:00
|
|
|
|
2017-07-06 16:42:38 +00:00
|
|
|
<-joinLeaveDone
|
2015-09-19 00:33:55 +00:00
|
|
|
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
2015-09-19 00:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sb.joinLeaveDone = make(chan struct{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// joinLeaveEnd marks the end of this join/leave operation and
|
|
|
|
// signals the same without race to other join and leave waiters
|
2023-01-12 01:10:09 +00:00
|
|
|
func (sb *Sandbox) joinLeaveEnd() {
|
2023-01-12 00:49:40 +00:00
|
|
|
sb.mu.Lock()
|
|
|
|
defer sb.mu.Unlock()
|
2015-09-19 00:33:55 +00:00
|
|
|
|
|
|
|
if sb.joinLeaveDone != nil {
|
|
|
|
close(sb.joinLeaveDone)
|
|
|
|
sb.joinLeaveDone = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-28 14:15:55 +00:00
|
|
|
// <=> Returns true if a < b, false if a > b and advances to next level if a == b
|
|
|
|
// epi.prio <=> epj.prio # 2 < 1
|
|
|
|
// epi.gw <=> epj.gw # non-gw < gw
|
|
|
|
// epi.internal <=> epj.internal # non-internal < internal
|
|
|
|
// epi.joininfo <=> epj.joininfo # ipv6 < ipv4
|
|
|
|
// epi.name <=> epj.name # bar < foo
|
2023-01-12 01:42:24 +00:00
|
|
|
func (epi *Endpoint) Less(epj *Endpoint) bool {
|
2022-01-20 13:10:24 +00:00
|
|
|
var prioi, prioj int
|
2015-11-03 01:54:22 +00:00
|
|
|
|
2018-03-28 14:15:55 +00:00
|
|
|
sbi, _ := epi.getSandbox()
|
|
|
|
sbj, _ := epj.getSandbox()
|
2015-09-07 01:34:50 +00:00
|
|
|
|
2018-03-28 14:15:55 +00:00
|
|
|
// Prio defaults to 0
|
|
|
|
if sbi != nil {
|
|
|
|
prioi = sbi.epPriority[epi.ID()]
|
|
|
|
}
|
|
|
|
if sbj != nil {
|
|
|
|
prioj = sbj.epPriority[epj.ID()]
|
2015-09-07 01:34:50 +00:00
|
|
|
}
|
|
|
|
|
2018-03-28 14:15:55 +00:00
|
|
|
if prioi != prioj {
|
|
|
|
return prioi > prioj
|
2015-09-07 01:34:50 +00:00
|
|
|
}
|
|
|
|
|
2018-03-28 14:15:55 +00:00
|
|
|
gwi := epi.endpointInGWNetwork()
|
|
|
|
gwj := epj.endpointInGWNetwork()
|
|
|
|
if gwi != gwj {
|
|
|
|
return gwj
|
2016-04-28 19:04:22 +00:00
|
|
|
}
|
|
|
|
|
2018-03-28 14:15:55 +00:00
|
|
|
inti := epi.getNetwork().Internal()
|
|
|
|
intj := epj.getNetwork().Internal()
|
|
|
|
if inti != intj {
|
|
|
|
return intj
|
2016-04-28 19:04:22 +00:00
|
|
|
}
|
|
|
|
|
2018-03-28 14:15:55 +00:00
|
|
|
jii := 0
|
|
|
|
if epi.joinInfo != nil {
|
|
|
|
if epi.joinInfo.gw != nil {
|
|
|
|
jii = jii + 1
|
2017-01-03 19:39:04 +00:00
|
|
|
}
|
2018-03-28 14:15:55 +00:00
|
|
|
if epi.joinInfo.gw6 != nil {
|
|
|
|
jii = jii + 2
|
2017-01-03 19:39:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-28 14:15:55 +00:00
|
|
|
jij := 0
|
|
|
|
if epj.joinInfo != nil {
|
|
|
|
if epj.joinInfo.gw != nil {
|
|
|
|
jij = jij + 1
|
2015-11-03 01:54:22 +00:00
|
|
|
}
|
2018-03-28 14:15:55 +00:00
|
|
|
if epj.joinInfo.gw6 != nil {
|
|
|
|
jij = jij + 2
|
2015-11-03 01:54:22 +00:00
|
|
|
}
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
2015-11-03 01:54:22 +00:00
|
|
|
|
2018-03-28 14:15:55 +00:00
|
|
|
if jii != jij {
|
|
|
|
return jii > jij
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
|
|
|
|
2018-03-28 14:15:55 +00:00
|
|
|
return epi.network.Name() < epj.network.Name()
|
2015-07-02 05:00:48 +00:00
|
|
|
}
|
2016-09-19 22:48:06 +00:00
|
|
|
|
2023-01-12 01:10:09 +00:00
|
|
|
func (sb *Sandbox) NdotsSet() bool {
|
2016-09-19 22:48:06 +00:00
|
|
|
return sb.ndotsSet
|
|
|
|
}
|