2021-08-23 13:14:53 +00:00
|
|
|
//go:build linux || windows
|
2016-10-12 23:55:20 +00:00
|
|
|
// +build linux windows
|
|
|
|
|
|
|
|
package libnetwork
|
|
|
|
|
|
|
|
import (
|
|
|
|
"net"
|
|
|
|
|
2017-07-26 21:18:31 +00:00
|
|
|
"github.com/sirupsen/logrus"
|
2016-10-12 23:55:20 +00:00
|
|
|
)
|
|
|
|
|
2018-02-23 23:24:47 +00:00
|
|
|
const maxSetStringLen = 350
|
|
|
|
|
2023-01-11 22:43:32 +00:00
|
|
|
func (c *Controller) addEndpointNameResolution(svcName, svcID, nID, eID, containerName string, vip net.IP, serviceAliases, taskAliases []string, ip net.IP, addService bool, method string) error {
|
2017-06-06 23:04:50 +00:00
|
|
|
n, err := c.NetworkByID(nID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-18 12:25:58 +00:00
|
|
|
logrus.Debugf("addEndpointNameResolution %s %s add_service:%t sAliases:%v tAliases:%v", eID, svcName, addService, serviceAliases, taskAliases)
|
2017-06-06 23:04:50 +00:00
|
|
|
|
|
|
|
// Add container resolution mappings
|
2021-05-28 00:15:56 +00:00
|
|
|
if err := c.addContainerNameResolution(nID, eID, containerName, taskAliases, ip, method); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-06 23:04:50 +00:00
|
|
|
|
2017-06-18 12:25:58 +00:00
|
|
|
serviceID := svcID
|
|
|
|
if serviceID == "" {
|
|
|
|
// This is the case of a normal container not part of a service
|
|
|
|
serviceID = eID
|
|
|
|
}
|
|
|
|
|
2017-06-06 23:04:50 +00:00
|
|
|
// Add endpoint IP to special "tasks.svc_name" so that the applications have access to DNS RR.
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).addSvcRecords(eID, "tasks."+svcName, serviceID, ip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
for _, alias := range serviceAliases {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).addSvcRecords(eID, "tasks."+alias, serviceID, ip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add service name to vip in DNS, if vip is valid. Otherwise resort to DNS RR
|
|
|
|
if len(vip) == 0 {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).addSvcRecords(eID, svcName, serviceID, ip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
for _, alias := range serviceAliases {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).addSvcRecords(eID, alias, serviceID, ip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if addService && len(vip) != 0 {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).addSvcRecords(eID, svcName, serviceID, vip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
for _, alias := range serviceAliases {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).addSvcRecords(eID, alias, serviceID, vip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-11 22:43:32 +00:00
|
|
|
func (c *Controller) addContainerNameResolution(nID, eID, containerName string, taskAliases []string, ip net.IP, method string) error {
|
2017-06-06 23:04:50 +00:00
|
|
|
n, err := c.NetworkByID(nID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
logrus.Debugf("addContainerNameResolution %s %s", eID, containerName)
|
|
|
|
|
|
|
|
// Add resolution for container name
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).addSvcRecords(eID, containerName, eID, ip, nil, true, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
|
|
|
|
// Add resolution for taskaliases
|
|
|
|
for _, alias := range taskAliases {
|
2018-11-10 18:53:06 +00:00
|
|
|
n.(*network).addSvcRecords(eID, alias, eID, ip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-11 22:43:32 +00:00
|
|
|
func (c *Controller) deleteEndpointNameResolution(svcName, svcID, nID, eID, containerName string, vip net.IP, serviceAliases, taskAliases []string, ip net.IP, rmService, multipleEntries bool, method string) error {
|
2017-06-06 23:04:50 +00:00
|
|
|
n, err := c.NetworkByID(nID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-18 12:25:58 +00:00
|
|
|
logrus.Debugf("deleteEndpointNameResolution %s %s rm_service:%t suppress:%t sAliases:%v tAliases:%v", eID, svcName, rmService, multipleEntries, serviceAliases, taskAliases)
|
2017-06-06 23:04:50 +00:00
|
|
|
|
|
|
|
// Delete container resolution mappings
|
2021-05-28 00:15:56 +00:00
|
|
|
if err := c.delContainerNameResolution(nID, eID, containerName, taskAliases, ip, method); err != nil {
|
|
|
|
logrus.WithError(err).Warn("Error delting container from resolver")
|
|
|
|
}
|
2017-06-06 23:04:50 +00:00
|
|
|
|
2017-06-18 12:25:58 +00:00
|
|
|
serviceID := svcID
|
|
|
|
if serviceID == "" {
|
|
|
|
// This is the case of a normal container not part of a service
|
|
|
|
serviceID = eID
|
|
|
|
}
|
|
|
|
|
2017-06-06 23:04:50 +00:00
|
|
|
// Delete the special "tasks.svc_name" backend record.
|
|
|
|
if !multipleEntries {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).deleteSvcRecords(eID, "tasks."+svcName, serviceID, ip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
for _, alias := range serviceAliases {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).deleteSvcRecords(eID, "tasks."+alias, serviceID, ip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are doing DNS RR delete the endpoint IP from DNS record right away.
|
|
|
|
if !multipleEntries && len(vip) == 0 {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).deleteSvcRecords(eID, svcName, serviceID, ip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
for _, alias := range serviceAliases {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).deleteSvcRecords(eID, alias, serviceID, ip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the DNS record for VIP only if we are removing the service
|
|
|
|
if rmService && len(vip) != 0 && !multipleEntries {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).deleteSvcRecords(eID, svcName, serviceID, vip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
for _, alias := range serviceAliases {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).deleteSvcRecords(eID, alias, serviceID, vip, nil, false, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-11 22:43:32 +00:00
|
|
|
func (c *Controller) delContainerNameResolution(nID, eID, containerName string, taskAliases []string, ip net.IP, method string) error {
|
2017-06-06 23:04:50 +00:00
|
|
|
n, err := c.NetworkByID(nID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
logrus.Debugf("delContainerNameResolution %s %s", eID, containerName)
|
|
|
|
|
|
|
|
// Delete resolution for container name
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).deleteSvcRecords(eID, containerName, eID, ip, nil, true, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
|
|
|
|
// Delete resolution for taskaliases
|
|
|
|
for _, alias := range taskAliases {
|
2017-06-18 12:25:58 +00:00
|
|
|
n.(*network).deleteSvcRecords(eID, alias, eID, ip, nil, true, method)
|
2017-06-06 23:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func newService(name string, id string, ingressPorts []*PortConfig, serviceAliases []string) *service {
|
2016-10-12 23:55:20 +00:00
|
|
|
return &service{
|
|
|
|
name: name,
|
|
|
|
id: id,
|
|
|
|
ingressPorts: ingressPorts,
|
|
|
|
loadBalancers: make(map[string]*loadBalancer),
|
2017-06-06 23:04:50 +00:00
|
|
|
aliases: serviceAliases,
|
2016-10-12 23:55:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-11 22:43:32 +00:00
|
|
|
func (c *Controller) getLBIndex(sid, nid string, ingressPorts []*PortConfig) int {
|
2017-03-02 07:57:37 +00:00
|
|
|
skey := serviceKey{
|
|
|
|
id: sid,
|
|
|
|
ports: portConfigs(ingressPorts).String(),
|
|
|
|
}
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Lock()
|
2017-03-02 07:57:37 +00:00
|
|
|
s, ok := c.serviceBindings[skey]
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Unlock()
|
2017-03-02 07:57:37 +00:00
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Lock()
|
|
|
|
lb := s.loadBalancers[nid]
|
|
|
|
s.Unlock()
|
|
|
|
|
|
|
|
return int(lb.fwMark)
|
|
|
|
}
|
|
|
|
|
2017-10-13 04:41:29 +00:00
|
|
|
// cleanupServiceDiscovery when the network is being deleted, erase all the associated service discovery records
|
2023-01-11 22:43:32 +00:00
|
|
|
func (c *Controller) cleanupServiceDiscovery(cleanupNID string) {
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
2017-10-13 04:41:29 +00:00
|
|
|
if cleanupNID == "" {
|
|
|
|
logrus.Debugf("cleanupServiceDiscovery for all networks")
|
2023-03-29 17:31:12 +00:00
|
|
|
c.svcRecords = make(map[string]*svcInfo)
|
2017-10-13 04:41:29 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
logrus.Debugf("cleanupServiceDiscovery for network:%s", cleanupNID)
|
|
|
|
delete(c.svcRecords, cleanupNID)
|
|
|
|
}
|
|
|
|
|
2023-01-11 22:43:32 +00:00
|
|
|
func (c *Controller) cleanupServiceBindings(cleanupNID string) {
|
2016-10-12 23:55:20 +00:00
|
|
|
var cleanupFuncs []func()
|
|
|
|
|
2017-06-18 12:25:58 +00:00
|
|
|
logrus.Debugf("cleanupServiceBindings for %s", cleanupNID)
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Lock()
|
2016-10-12 23:55:20 +00:00
|
|
|
services := make([]*service, 0, len(c.serviceBindings))
|
|
|
|
for _, s := range c.serviceBindings {
|
|
|
|
services = append(services, s)
|
|
|
|
}
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Unlock()
|
2016-10-12 23:55:20 +00:00
|
|
|
|
|
|
|
for _, s := range services {
|
|
|
|
s.Lock()
|
2017-06-06 23:04:50 +00:00
|
|
|
// Skip the serviceBindings that got deleted
|
|
|
|
if s.deleted {
|
|
|
|
s.Unlock()
|
|
|
|
continue
|
|
|
|
}
|
2016-10-12 23:55:20 +00:00
|
|
|
for nid, lb := range s.loadBalancers {
|
|
|
|
if cleanupNID != "" && nid != cleanupNID {
|
|
|
|
continue
|
|
|
|
}
|
Gracefully remove LB endpoints from services
This patch attempts to allow endpoints to complete servicing connections
while being removed from a service. The change adds a flag to the
endpoint.deleteServiceInfoFromCluster() method to indicate whether this
removal should fully remove connectivity through the load balancer
to the endpoint or should just disable directing further connections to
the endpoint. If the flag is 'false', then the load balancer assigns
a weight of 0 to the endpoint but does not remove it as a linux load
balancing destination. It does remove the endpoint as a docker load
balancing endpoint but tracks it in a special map of "disabled-but-not-
destroyed" load balancing endpoints. This allows traffic to continue
flowing, at least under Linux. If the flag is 'true', then the code
removes the endpoint entirely as a load balancing destination.
The sandbox.DisableService() method invokes deleteServiceInfoFromCluster()
with the flag sent to 'false', while the endpoint.sbLeave() method invokes
it with the flag set to 'true' to complete the removal on endpoint
finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster()
with the flag set to 'true' because renaming attempts to completely
remove and then re-add each endpoint service entry.
The controller.rmServiceBinding() method, which carries out the operation,
similarly gets a new flag for whether to fully remove the endpoint. If
the flag is false, it does the job of moving the endpoint from the
load balancing set to the 'disabled' set. It then removes or
de-weights the entry in the OS load balancing table via
network.rmLBBackend(). It removes the service entirely via said method
ONLY IF there are no more live or disabled load balancing endpoints.
Similarly network.addLBBackend() requires slight tweaking to properly
manage the disabled set.
Finally, this change requires propagating the status of disabled
service endpoints via the networkDB. Accordingly, the patch includes
both code to generate and handle service update messages. It also
augments the service structure with a ServiceDisabled boolean to convey
whether an endpoint should ultimately be removed or just disabled.
This, naturally, required a rebuild of the protocol buffer code as well.
Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
|
|
|
for eid, be := range lb.backEnds {
|
|
|
|
cleanupFuncs = append(cleanupFuncs, makeServiceCleanupFunc(c, s, nid, eid, lb.vip, be.ip))
|
2016-10-12 23:55:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
s.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, f := range cleanupFuncs {
|
|
|
|
f()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-11 22:43:32 +00:00
|
|
|
func makeServiceCleanupFunc(c *Controller, s *service, nID, eID string, vip net.IP, ip net.IP) func() {
|
Gracefully remove LB endpoints from services
This patch attempts to allow endpoints to complete servicing connections
while being removed from a service. The change adds a flag to the
endpoint.deleteServiceInfoFromCluster() method to indicate whether this
removal should fully remove connectivity through the load balancer
to the endpoint or should just disable directing further connections to
the endpoint. If the flag is 'false', then the load balancer assigns
a weight of 0 to the endpoint but does not remove it as a linux load
balancing destination. It does remove the endpoint as a docker load
balancing endpoint but tracks it in a special map of "disabled-but-not-
destroyed" load balancing endpoints. This allows traffic to continue
flowing, at least under Linux. If the flag is 'true', then the code
removes the endpoint entirely as a load balancing destination.
The sandbox.DisableService() method invokes deleteServiceInfoFromCluster()
with the flag sent to 'false', while the endpoint.sbLeave() method invokes
it with the flag set to 'true' to complete the removal on endpoint
finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster()
with the flag set to 'true' because renaming attempts to completely
remove and then re-add each endpoint service entry.
The controller.rmServiceBinding() method, which carries out the operation,
similarly gets a new flag for whether to fully remove the endpoint. If
the flag is false, it does the job of moving the endpoint from the
load balancing set to the 'disabled' set. It then removes or
de-weights the entry in the OS load balancing table via
network.rmLBBackend(). It removes the service entirely via said method
ONLY IF there are no more live or disabled load balancing endpoints.
Similarly network.addLBBackend() requires slight tweaking to properly
manage the disabled set.
Finally, this change requires propagating the status of disabled
service endpoints via the networkDB. Accordingly, the patch includes
both code to generate and handle service update messages. It also
augments the service structure with a ServiceDisabled boolean to convey
whether an endpoint should ultimately be removed or just disabled.
This, naturally, required a rebuild of the protocol buffer code as well.
Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
|
|
|
// ContainerName and taskAliases are not available here, this is still fine because the Service discovery
|
|
|
|
// cleanup already happened before. The only thing that rmServiceBinding is still doing here a part from the Load
|
|
|
|
// Balancer bookeeping, is to keep consistent the mapping of endpoint to IP.
|
|
|
|
return func() {
|
|
|
|
if err := c.rmServiceBinding(s.name, s.id, nID, eID, "", vip, s.ingressPorts, s.aliases, []string{}, ip, "cleanupServiceBindings", false, true); err != nil {
|
|
|
|
logrus.Errorf("Failed to remove service bindings for service %s network %s endpoint %s while cleanup: %v", s.id, nID, eID, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-11 22:43:32 +00:00
|
|
|
func (c *Controller) addServiceBinding(svcName, svcID, nID, eID, containerName string, vip net.IP, ingressPorts []*PortConfig, serviceAliases, taskAliases []string, ip net.IP, method string) error {
|
2017-06-06 23:04:50 +00:00
|
|
|
var addService bool
|
|
|
|
|
2018-06-20 19:53:38 +00:00
|
|
|
// Failure to lock the network ID on add can result in racing
|
|
|
|
// racing against network deletion resulting in inconsistent
|
|
|
|
// state in the c.serviceBindings map and it's sub-maps. Also,
|
|
|
|
// always lock network ID before services to avoid deadlock.
|
|
|
|
c.networkLocker.Lock(nID)
|
2022-07-13 20:30:47 +00:00
|
|
|
defer c.networkLocker.Unlock(nID) //nolint:errcheck
|
2018-06-20 19:53:38 +00:00
|
|
|
|
2017-06-06 23:04:50 +00:00
|
|
|
n, err := c.NetworkByID(nID)
|
2016-10-12 23:55:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
skey := serviceKey{
|
2017-06-06 23:04:50 +00:00
|
|
|
id: svcID,
|
2016-10-12 23:55:20 +00:00
|
|
|
ports: portConfigs(ingressPorts).String(),
|
|
|
|
}
|
|
|
|
|
2017-06-06 23:04:50 +00:00
|
|
|
var s *service
|
|
|
|
for {
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Lock()
|
2017-06-06 23:04:50 +00:00
|
|
|
var ok bool
|
|
|
|
s, ok = c.serviceBindings[skey]
|
|
|
|
if !ok {
|
|
|
|
// Create a new service if we are seeing this service
|
|
|
|
// for the first time.
|
|
|
|
s = newService(svcName, svcID, ingressPorts, serviceAliases)
|
|
|
|
c.serviceBindings[skey] = s
|
|
|
|
}
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Unlock()
|
2017-06-06 23:04:50 +00:00
|
|
|
s.Lock()
|
|
|
|
if !s.deleted {
|
|
|
|
// ok the object is good to be used
|
|
|
|
break
|
|
|
|
}
|
|
|
|
s.Unlock()
|
2016-10-12 23:55:20 +00:00
|
|
|
}
|
2017-06-18 12:25:58 +00:00
|
|
|
logrus.Debugf("addServiceBinding from %s START for %s %s p:%p nid:%s skey:%v", method, svcName, eID, s, nID, skey)
|
2016-10-12 23:55:20 +00:00
|
|
|
defer s.Unlock()
|
|
|
|
|
2017-06-06 23:04:50 +00:00
|
|
|
lb, ok := s.loadBalancers[nID]
|
2016-10-12 23:55:20 +00:00
|
|
|
if !ok {
|
|
|
|
// Create a new load balancer if we are seeing this
|
|
|
|
// network attachment on the service for the first
|
|
|
|
// time.
|
2017-06-12 06:12:01 +00:00
|
|
|
fwMarkCtrMu.Lock()
|
|
|
|
|
2016-10-12 23:55:20 +00:00
|
|
|
lb = &loadBalancer{
|
|
|
|
vip: vip,
|
|
|
|
fwMark: fwMarkCtr,
|
Gracefully remove LB endpoints from services
This patch attempts to allow endpoints to complete servicing connections
while being removed from a service. The change adds a flag to the
endpoint.deleteServiceInfoFromCluster() method to indicate whether this
removal should fully remove connectivity through the load balancer
to the endpoint or should just disable directing further connections to
the endpoint. If the flag is 'false', then the load balancer assigns
a weight of 0 to the endpoint but does not remove it as a linux load
balancing destination. It does remove the endpoint as a docker load
balancing endpoint but tracks it in a special map of "disabled-but-not-
destroyed" load balancing endpoints. This allows traffic to continue
flowing, at least under Linux. If the flag is 'true', then the code
removes the endpoint entirely as a load balancing destination.
The sandbox.DisableService() method invokes deleteServiceInfoFromCluster()
with the flag sent to 'false', while the endpoint.sbLeave() method invokes
it with the flag set to 'true' to complete the removal on endpoint
finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster()
with the flag set to 'true' because renaming attempts to completely
remove and then re-add each endpoint service entry.
The controller.rmServiceBinding() method, which carries out the operation,
similarly gets a new flag for whether to fully remove the endpoint. If
the flag is false, it does the job of moving the endpoint from the
load balancing set to the 'disabled' set. It then removes or
de-weights the entry in the OS load balancing table via
network.rmLBBackend(). It removes the service entirely via said method
ONLY IF there are no more live or disabled load balancing endpoints.
Similarly network.addLBBackend() requires slight tweaking to properly
manage the disabled set.
Finally, this change requires propagating the status of disabled
service endpoints via the networkDB. Accordingly, the patch includes
both code to generate and handle service update messages. It also
augments the service structure with a ServiceDisabled boolean to convey
whether an endpoint should ultimately be removed or just disabled.
This, naturally, required a rebuild of the protocol buffer code as well.
Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
|
|
|
backEnds: make(map[string]*lbBackend),
|
2016-10-12 23:55:20 +00:00
|
|
|
service: s,
|
|
|
|
}
|
|
|
|
|
|
|
|
fwMarkCtr++
|
|
|
|
fwMarkCtrMu.Unlock()
|
|
|
|
|
2017-06-06 23:04:50 +00:00
|
|
|
s.loadBalancers[nID] = lb
|
|
|
|
addService = true
|
2016-10-12 23:55:20 +00:00
|
|
|
}
|
|
|
|
|
Gracefully remove LB endpoints from services
This patch attempts to allow endpoints to complete servicing connections
while being removed from a service. The change adds a flag to the
endpoint.deleteServiceInfoFromCluster() method to indicate whether this
removal should fully remove connectivity through the load balancer
to the endpoint or should just disable directing further connections to
the endpoint. If the flag is 'false', then the load balancer assigns
a weight of 0 to the endpoint but does not remove it as a linux load
balancing destination. It does remove the endpoint as a docker load
balancing endpoint but tracks it in a special map of "disabled-but-not-
destroyed" load balancing endpoints. This allows traffic to continue
flowing, at least under Linux. If the flag is 'true', then the code
removes the endpoint entirely as a load balancing destination.
The sandbox.DisableService() method invokes deleteServiceInfoFromCluster()
with the flag sent to 'false', while the endpoint.sbLeave() method invokes
it with the flag set to 'true' to complete the removal on endpoint
finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster()
with the flag set to 'true' because renaming attempts to completely
remove and then re-add each endpoint service entry.
The controller.rmServiceBinding() method, which carries out the operation,
similarly gets a new flag for whether to fully remove the endpoint. If
the flag is false, it does the job of moving the endpoint from the
load balancing set to the 'disabled' set. It then removes or
de-weights the entry in the OS load balancing table via
network.rmLBBackend(). It removes the service entirely via said method
ONLY IF there are no more live or disabled load balancing endpoints.
Similarly network.addLBBackend() requires slight tweaking to properly
manage the disabled set.
Finally, this change requires propagating the status of disabled
service endpoints via the networkDB. Accordingly, the patch includes
both code to generate and handle service update messages. It also
augments the service structure with a ServiceDisabled boolean to convey
whether an endpoint should ultimately be removed or just disabled.
This, naturally, required a rebuild of the protocol buffer code as well.
Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
|
|
|
lb.backEnds[eID] = &lbBackend{ip, false}
|
2017-06-06 23:04:50 +00:00
|
|
|
|
|
|
|
ok, entries := s.assignIPToEndpoint(ip.String(), eID)
|
|
|
|
if !ok || entries > 1 {
|
|
|
|
setStr, b := s.printIPToEndpoint(ip.String())
|
2018-03-01 17:24:36 +00:00
|
|
|
if len(setStr) > maxSetStringLen {
|
|
|
|
setStr = setStr[:maxSetStringLen]
|
|
|
|
}
|
|
|
|
logrus.Warnf("addServiceBinding %s possible transient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr)
|
2017-06-06 23:04:50 +00:00
|
|
|
}
|
2016-10-12 23:55:20 +00:00
|
|
|
|
2018-04-10 16:34:41 +00:00
|
|
|
// Add loadbalancer service and backend to the network
|
2018-04-10 04:36:19 +00:00
|
|
|
n.(*network).addLBBackend(ip, lb)
|
2016-10-12 23:55:20 +00:00
|
|
|
|
2017-06-06 23:04:50 +00:00
|
|
|
// Add the appropriate name resolutions
|
2021-05-28 00:15:56 +00:00
|
|
|
if err := c.addEndpointNameResolution(svcName, svcID, nID, eID, containerName, vip, serviceAliases, taskAliases, ip, addService, "addServiceBinding"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-06 23:04:50 +00:00
|
|
|
|
|
|
|
logrus.Debugf("addServiceBinding from %s END for %s %s", method, svcName, eID)
|
|
|
|
|
2016-10-12 23:55:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-11 22:43:32 +00:00
|
|
|
func (c *Controller) rmServiceBinding(svcName, svcID, nID, eID, containerName string, vip net.IP, ingressPorts []*PortConfig, serviceAliases []string, taskAliases []string, ip net.IP, method string, deleteSvcRecords bool, fullRemove bool) error {
|
2016-10-12 23:55:20 +00:00
|
|
|
var rmService bool
|
|
|
|
|
|
|
|
skey := serviceKey{
|
2017-06-06 23:04:50 +00:00
|
|
|
id: svcID,
|
2016-10-12 23:55:20 +00:00
|
|
|
ports: portConfigs(ingressPorts).String(),
|
|
|
|
}
|
|
|
|
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Lock()
|
2016-10-12 23:55:20 +00:00
|
|
|
s, ok := c.serviceBindings[skey]
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Unlock()
|
2016-10-12 23:55:20 +00:00
|
|
|
if !ok {
|
2017-06-06 23:04:50 +00:00
|
|
|
logrus.Warnf("rmServiceBinding %s %s %s aborted c.serviceBindings[skey] !ok", method, svcName, eID)
|
2016-10-12 23:55:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Lock()
|
2017-06-06 23:04:50 +00:00
|
|
|
defer s.Unlock()
|
2017-06-18 12:25:58 +00:00
|
|
|
logrus.Debugf("rmServiceBinding from %s START for %s %s p:%p nid:%s sKey:%v deleteSvc:%t", method, svcName, eID, s, nID, skey, deleteSvcRecords)
|
2017-06-06 23:04:50 +00:00
|
|
|
lb, ok := s.loadBalancers[nID]
|
2016-10-12 23:55:20 +00:00
|
|
|
if !ok {
|
2017-06-06 23:04:50 +00:00
|
|
|
logrus.Warnf("rmServiceBinding %s %s %s aborted s.loadBalancers[nid] !ok", method, svcName, eID)
|
2016-10-12 23:55:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
Gracefully remove LB endpoints from services
This patch attempts to allow endpoints to complete servicing connections
while being removed from a service. The change adds a flag to the
endpoint.deleteServiceInfoFromCluster() method to indicate whether this
removal should fully remove connectivity through the load balancer
to the endpoint or should just disable directing further connections to
the endpoint. If the flag is 'false', then the load balancer assigns
a weight of 0 to the endpoint but does not remove it as a linux load
balancing destination. It does remove the endpoint as a docker load
balancing endpoint but tracks it in a special map of "disabled-but-not-
destroyed" load balancing endpoints. This allows traffic to continue
flowing, at least under Linux. If the flag is 'true', then the code
removes the endpoint entirely as a load balancing destination.
The sandbox.DisableService() method invokes deleteServiceInfoFromCluster()
with the flag sent to 'false', while the endpoint.sbLeave() method invokes
it with the flag set to 'true' to complete the removal on endpoint
finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster()
with the flag set to 'true' because renaming attempts to completely
remove and then re-add each endpoint service entry.
The controller.rmServiceBinding() method, which carries out the operation,
similarly gets a new flag for whether to fully remove the endpoint. If
the flag is false, it does the job of moving the endpoint from the
load balancing set to the 'disabled' set. It then removes or
de-weights the entry in the OS load balancing table via
network.rmLBBackend(). It removes the service entirely via said method
ONLY IF there are no more live or disabled load balancing endpoints.
Similarly network.addLBBackend() requires slight tweaking to properly
manage the disabled set.
Finally, this change requires propagating the status of disabled
service endpoints via the networkDB. Accordingly, the patch includes
both code to generate and handle service update messages. It also
augments the service structure with a ServiceDisabled boolean to convey
whether an endpoint should ultimately be removed or just disabled.
This, naturally, required a rebuild of the protocol buffer code as well.
Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
|
|
|
be, ok := lb.backEnds[eID]
|
2016-10-12 23:55:20 +00:00
|
|
|
if !ok {
|
Gracefully remove LB endpoints from services
This patch attempts to allow endpoints to complete servicing connections
while being removed from a service. The change adds a flag to the
endpoint.deleteServiceInfoFromCluster() method to indicate whether this
removal should fully remove connectivity through the load balancer
to the endpoint or should just disable directing further connections to
the endpoint. If the flag is 'false', then the load balancer assigns
a weight of 0 to the endpoint but does not remove it as a linux load
balancing destination. It does remove the endpoint as a docker load
balancing endpoint but tracks it in a special map of "disabled-but-not-
destroyed" load balancing endpoints. This allows traffic to continue
flowing, at least under Linux. If the flag is 'true', then the code
removes the endpoint entirely as a load balancing destination.
The sandbox.DisableService() method invokes deleteServiceInfoFromCluster()
with the flag sent to 'false', while the endpoint.sbLeave() method invokes
it with the flag set to 'true' to complete the removal on endpoint
finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster()
with the flag set to 'true' because renaming attempts to completely
remove and then re-add each endpoint service entry.
The controller.rmServiceBinding() method, which carries out the operation,
similarly gets a new flag for whether to fully remove the endpoint. If
the flag is false, it does the job of moving the endpoint from the
load balancing set to the 'disabled' set. It then removes or
de-weights the entry in the OS load balancing table via
network.rmLBBackend(). It removes the service entirely via said method
ONLY IF there are no more live or disabled load balancing endpoints.
Similarly network.addLBBackend() requires slight tweaking to properly
manage the disabled set.
Finally, this change requires propagating the status of disabled
service endpoints via the networkDB. Accordingly, the patch includes
both code to generate and handle service update messages. It also
augments the service structure with a ServiceDisabled boolean to convey
whether an endpoint should ultimately be removed or just disabled.
This, naturally, required a rebuild of the protocol buffer code as well.
Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
|
|
|
logrus.Warnf("rmServiceBinding %s %s %s aborted lb.backEnds[eid] && lb.disabled[eid] !ok", method, svcName, eID)
|
2016-10-12 23:55:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
Gracefully remove LB endpoints from services
This patch attempts to allow endpoints to complete servicing connections
while being removed from a service. The change adds a flag to the
endpoint.deleteServiceInfoFromCluster() method to indicate whether this
removal should fully remove connectivity through the load balancer
to the endpoint or should just disable directing further connections to
the endpoint. If the flag is 'false', then the load balancer assigns
a weight of 0 to the endpoint but does not remove it as a linux load
balancing destination. It does remove the endpoint as a docker load
balancing endpoint but tracks it in a special map of "disabled-but-not-
destroyed" load balancing endpoints. This allows traffic to continue
flowing, at least under Linux. If the flag is 'true', then the code
removes the endpoint entirely as a load balancing destination.
The sandbox.DisableService() method invokes deleteServiceInfoFromCluster()
with the flag sent to 'false', while the endpoint.sbLeave() method invokes
it with the flag set to 'true' to complete the removal on endpoint
finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster()
with the flag set to 'true' because renaming attempts to completely
remove and then re-add each endpoint service entry.
The controller.rmServiceBinding() method, which carries out the operation,
similarly gets a new flag for whether to fully remove the endpoint. If
the flag is false, it does the job of moving the endpoint from the
load balancing set to the 'disabled' set. It then removes or
de-weights the entry in the OS load balancing table via
network.rmLBBackend(). It removes the service entirely via said method
ONLY IF there are no more live or disabled load balancing endpoints.
Similarly network.addLBBackend() requires slight tweaking to properly
manage the disabled set.
Finally, this change requires propagating the status of disabled
service endpoints via the networkDB. Accordingly, the patch includes
both code to generate and handle service update messages. It also
augments the service structure with a ServiceDisabled boolean to convey
whether an endpoint should ultimately be removed or just disabled.
This, naturally, required a rebuild of the protocol buffer code as well.
Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 22:04:23 +00:00
|
|
|
if fullRemove {
|
|
|
|
// delete regardless
|
|
|
|
delete(lb.backEnds, eID)
|
|
|
|
} else {
|
|
|
|
be.disabled = true
|
|
|
|
}
|
|
|
|
|
2016-10-12 23:55:20 +00:00
|
|
|
if len(lb.backEnds) == 0 {
|
|
|
|
// All the backends for this service have been
|
|
|
|
// removed. Time to remove the load balancer and also
|
|
|
|
// remove the service entry in IPVS.
|
|
|
|
rmService = true
|
|
|
|
|
2017-06-06 23:04:50 +00:00
|
|
|
delete(s.loadBalancers, nID)
|
2017-06-18 12:25:58 +00:00
|
|
|
logrus.Debugf("rmServiceBinding %s delete %s, p:%p in loadbalancers len:%d", eID, nID, lb, len(s.loadBalancers))
|
2016-10-12 23:55:20 +00:00
|
|
|
}
|
|
|
|
|
2017-06-06 23:04:50 +00:00
|
|
|
ok, entries := s.removeIPToEndpoint(ip.String(), eID)
|
|
|
|
if !ok || entries > 0 {
|
|
|
|
setStr, b := s.printIPToEndpoint(ip.String())
|
2018-03-01 17:24:36 +00:00
|
|
|
if len(setStr) > maxSetStringLen {
|
|
|
|
setStr = setStr[:maxSetStringLen]
|
|
|
|
}
|
|
|
|
logrus.Warnf("rmServiceBinding %s possible transient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr)
|
2017-06-06 23:04:50 +00:00
|
|
|
}
|
|
|
|
|
2016-10-12 23:55:20 +00:00
|
|
|
// Remove loadbalancer service(if needed) and backend in all
|
|
|
|
// sandboxes in the network only if the vip is valid.
|
2018-04-10 04:36:19 +00:00
|
|
|
if entries == 0 {
|
2021-02-04 08:21:45 +00:00
|
|
|
// The network may well have been deleted from the store (and
|
|
|
|
// dataplane) before the last of the service bindings. On Linux that's
|
|
|
|
// ok because removing the network sandbox from the dataplane
|
|
|
|
// implicitly cleans up all related dataplane state.
|
|
|
|
// On the Windows dataplane, VFP policylists must be removed
|
|
|
|
// independently of the network, and they must be removed before the HNS
|
|
|
|
// network. Otherwise, policylist removal fails with "network not
|
|
|
|
// found." On Windows cleanupServiceBindings must be called prior to
|
|
|
|
// removing the network from the store or dataplane.
|
2018-04-10 16:34:41 +00:00
|
|
|
n, err := c.NetworkByID(nID)
|
|
|
|
if err == nil {
|
|
|
|
n.(*network).rmLBBackend(ip, lb, rmService, fullRemove)
|
|
|
|
}
|
2016-10-12 23:55:20 +00:00
|
|
|
}
|
|
|
|
|
2017-06-06 23:04:50 +00:00
|
|
|
// Delete the name resolutions
|
2017-06-18 12:25:58 +00:00
|
|
|
if deleteSvcRecords {
|
2021-05-28 00:15:56 +00:00
|
|
|
if err := c.deleteEndpointNameResolution(svcName, svcID, nID, eID, containerName, vip, serviceAliases, taskAliases, ip, rmService, entries > 0, "rmServiceBinding"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-18 12:25:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(s.loadBalancers) == 0 {
|
|
|
|
// All loadbalancers for the service removed. Time to
|
|
|
|
// remove the service itself.
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Lock()
|
2017-06-18 12:25:58 +00:00
|
|
|
|
|
|
|
// Mark the object as deleted so that the add won't use it wrongly
|
|
|
|
s.deleted = true
|
|
|
|
// NOTE The delete from the serviceBindings map has to be the last operation else we are allowing a race between this service
|
|
|
|
// that is getting deleted and a new service that will be created if the entry is not anymore there
|
|
|
|
delete(c.serviceBindings, skey)
|
2023-01-11 20:56:50 +00:00
|
|
|
c.mu.Unlock()
|
2017-06-18 12:25:58 +00:00
|
|
|
}
|
2016-10-12 23:55:20 +00:00
|
|
|
|
2017-06-06 23:04:50 +00:00
|
|
|
logrus.Debugf("rmServiceBinding from %s END for %s %s", method, svcName, eID)
|
2016-10-12 23:55:20 +00:00
|
|
|
return nil
|
|
|
|
}
|