Bump libnetwork to 3ac297bc

Bump libnetwork to 3ac297bc7fd0afec9051bbb47024c9bc1d75bf5b in order to
get fix 0c3d9f00 which addresses a flaw that the scalable load balancing
code revealed.  Attempting to print sandbox IDs where the sandbox name
was too short results in a goroutine panic.  This can occur with
sandboxes with names of 1 or 2 characters in the previous code. But due
to naming updates in the scalable load balancing code, it could now
occur for networks whose name was 3 characters and at least one of the
integration tests employed such networks (named 'foo', 'bar' and 'baz').

This update also brings in several changes as well:
 * 6c7c6017 - Fix error handling about bridgeSetup
 * 5ed38221 - Optimize networkDB queue
 * cfa9afdb - ndots: produce error on negative numbers
 * 5586e226 - improve error message for invalid ndots number
 * 449672e5 - Allows to set generic knobs on the Sandbox
 * 6b4c4af7 - do not ignore user-provided "ndots:0" option
 * 843a0e42 - Adjust corner case for reconnect logic

Signed-off-by: Chris Telfer <ctelfer@docker.com>
This commit is contained in:
Chris Telfer 2018-07-05 22:54:25 -04:00
parent 8e0f6bc903
commit 0e162d9923
42 changed files with 338 additions and 166 deletions

View file

@ -3,7 +3,7 @@
# LIBNETWORK_COMMIT is used to build the docker-userland-proxy binary. When
# updating the binary version, consider updating github.com/docker/libnetwork
# in vendor.conf accordingly
LIBNETWORK_COMMIT=b0186632522c68f4e1222c4f6d7dbe518882024f
LIBNETWORK_COMMIT=3ac297bc7fd0afec9051bbb47024c9bc1d75bf5b
install_proxy() {
case "$1" in

View file

@ -37,7 +37,7 @@ github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
#get libnetwork packages
# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly
github.com/docker/libnetwork b0186632522c68f4e1222c4f6d7dbe518882024f
github.com/docker/libnetwork 3ac297bc7fd0afec9051bbb47024c9bc1d75bf5b
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec

View file

@ -1144,6 +1144,11 @@ func (c *controller) NewSandbox(containerID string, options ...SandboxOption) (S
}
}
if sb.osSbox != nil {
// Apply operating specific knobs on the load balancer sandbox
sb.osSbox.ApplyOSTweaks(sb.oslTypes)
}
c.Lock()
c.sandboxes[sb.id] = sb
c.Unlock()

View file

@ -120,3 +120,13 @@ type TablePeersResult struct {
TableObj
Elements []PeerEntryObj `json:"entries"`
}
// NetworkStatsResult network db stats related to entries and queue len for a network
type NetworkStatsResult struct {
Entries int `json:"entries"`
QueueLen int `jsoin:"qlen"`
}
func (n *NetworkStatsResult) String() string {
return fmt.Sprintf("entries: %d, qlen: %d\n", n.Entries, n.QueueLen)
}

View file

@ -614,9 +614,7 @@ func (d *driver) checkConflict(config *networkConfiguration) error {
return nil
}
func (d *driver) createNetwork(config *networkConfiguration) error {
var err error
func (d *driver) createNetwork(config *networkConfiguration) (err error) {
defer osl.InitOSContext()()
networkList := d.getNetworks()
@ -775,7 +773,7 @@ func (d *driver) deleteNetwork(nid string) error {
}
if err := d.storeDelete(ep); err != nil {
logrus.Warnf("Failed to remove bridge endpoint %s from store: %v", ep.id[0:7], err)
logrus.Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err)
}
}
@ -1050,7 +1048,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
}
if err = d.storeUpdate(endpoint); err != nil {
return fmt.Errorf("failed to save bridge endpoint %s to store: %v", endpoint.id[0:7], err)
return fmt.Errorf("failed to save bridge endpoint %.7s to store: %v", endpoint.id, err)
}
return nil
@ -1116,7 +1114,7 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
}
if err := d.storeDelete(ep); err != nil {
logrus.Warnf("Failed to remove bridge endpoint %s from store: %v", ep.id[0:7], err)
logrus.Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err)
}
return nil
@ -1290,7 +1288,7 @@ func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string
}()
if err = d.storeUpdate(endpoint); err != nil {
return fmt.Errorf("failed to update bridge endpoint %s to store: %v", endpoint.id[0:7], err)
return fmt.Errorf("failed to update bridge endpoint %.7s to store: %v", endpoint.id, err)
}
if !network.config.EnableICC {
@ -1332,7 +1330,7 @@ func (d *driver) RevokeExternalConnectivity(nid, eid string) error {
clearEndpointConnections(d.nlh, endpoint)
if err = d.storeUpdate(endpoint); err != nil {
return fmt.Errorf("failed to update bridge endpoint %s to store: %v", endpoint.id[0:7], err)
return fmt.Errorf("failed to update bridge endpoint %.7s to store: %v", endpoint.id, err)
}
return nil

View file

@ -62,7 +62,7 @@ func (d *driver) populateNetworks() error {
if err = d.createNetwork(ncfg); err != nil {
logrus.Warnf("could not create bridge network for id %s bridge name %s while booting up from persistent state: %v", ncfg.ID, ncfg.BridgeName, err)
}
logrus.Debugf("Network (%s) restored", ncfg.ID[0:7])
logrus.Debugf("Network (%.7s) restored", ncfg.ID)
}
return nil
@ -82,16 +82,16 @@ func (d *driver) populateEndpoints() error {
ep := kvo.(*bridgeEndpoint)
n, ok := d.networks[ep.nid]
if !ok {
logrus.Debugf("Network (%s) not found for restored bridge endpoint (%s)", ep.nid[0:7], ep.id[0:7])
logrus.Debugf("Deleting stale bridge endpoint (%s) from store", ep.id[0:7])
logrus.Debugf("Network (%.7s) not found for restored bridge endpoint (%.7s)", ep.nid, ep.id)
logrus.Debugf("Deleting stale bridge endpoint (%.7s) from store", ep.id)
if err := d.storeDelete(ep); err != nil {
logrus.Debugf("Failed to delete stale bridge endpoint (%s) from store", ep.id[0:7])
logrus.Debugf("Failed to delete stale bridge endpoint (%.7s) from store", ep.id)
}
continue
}
n.endpoints[ep.id] = ep
n.restorePortAllocations(ep)
logrus.Debugf("Endpoint (%s) restored to network (%s)", ep.id[0:7], ep.nid[0:7])
logrus.Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid)
}
return nil
@ -382,7 +382,7 @@ func (n *bridgeNetwork) restorePortAllocations(ep *bridgeEndpoint) {
ep.extConnConfig.PortBindings = ep.portMapping
_, err := n.allocatePorts(ep, n.config.DefaultBindingIP, n.driver.config.EnableUserlandProxy)
if err != nil {
logrus.Warnf("Failed to reserve existing port mapping for endpoint %s:%v", ep.id[0:7], err)
logrus.Warnf("Failed to reserve existing port mapping for endpoint %.7s:%v", ep.id, err)
}
ep.extConnConfig.PortBindings = tmp
}

View file

@ -53,7 +53,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
}
if err := d.storeUpdate(ep); err != nil {
return fmt.Errorf("failed to save ipvlan endpoint %s to store: %v", ep.id[0:7], err)
return fmt.Errorf("failed to save ipvlan endpoint %.7s to store: %v", ep.id, err)
}
n.addEndpoint(ep)
@ -82,7 +82,7 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
}
if err := d.storeDelete(ep); err != nil {
logrus.Warnf("Failed to remove ipvlan endpoint %s from store: %v", ep.id[0:7], err)
logrus.Warnf("Failed to remove ipvlan endpoint %.7s from store: %v", ep.id, err)
}
n.deleteEndpoint(ep.id)
return nil

View file

@ -117,7 +117,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
return err
}
if err = d.storeUpdate(ep); err != nil {
return fmt.Errorf("failed to save ipvlan endpoint %s to store: %v", ep.id[0:7], err)
return fmt.Errorf("failed to save ipvlan endpoint %.7s to store: %v", ep.id, err)
}
return nil

View file

@ -156,7 +156,7 @@ func (d *driver) DeleteNetwork(nid string) error {
}
if err := d.storeDelete(ep); err != nil {
logrus.Warnf("Failed to remove ipvlan endpoint %s from store: %v", ep.id[0:7], err)
logrus.Warnf("Failed to remove ipvlan endpoint %.7s from store: %v", ep.id, err)
}
}
// delete the *network

View file

@ -95,15 +95,15 @@ func (d *driver) populateEndpoints() error {
ep := kvo.(*endpoint)
n, ok := d.networks[ep.nid]
if !ok {
logrus.Debugf("Network (%s) not found for restored ipvlan endpoint (%s)", ep.nid[0:7], ep.id[0:7])
logrus.Debugf("Deleting stale ipvlan endpoint (%s) from store", ep.id[0:7])
logrus.Debugf("Network (%.7s) not found for restored ipvlan endpoint (%.7s)", ep.nid, ep.id)
logrus.Debugf("Deleting stale ipvlan endpoint (%.7s) from store", ep.id)
if err := d.storeDelete(ep); err != nil {
logrus.Debugf("Failed to delete stale ipvlan endpoint (%s) from store", ep.id[0:7])
logrus.Debugf("Failed to delete stale ipvlan endpoint (%.7s) from store", ep.id)
}
continue
}
n.endpoints[ep.id] = ep
logrus.Debugf("Endpoint (%s) restored to network (%s)", ep.id[0:7], ep.nid[0:7])
logrus.Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid)
}
return nil

View file

@ -58,7 +58,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
}
if err := d.storeUpdate(ep); err != nil {
return fmt.Errorf("failed to save macvlan endpoint %s to store: %v", ep.id[0:7], err)
return fmt.Errorf("failed to save macvlan endpoint %.7s to store: %v", ep.id, err)
}
n.addEndpoint(ep)
@ -87,7 +87,7 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
}
if err := d.storeDelete(ep); err != nil {
logrus.Warnf("Failed to remove macvlan endpoint %s from store: %v", ep.id[0:7], err)
logrus.Warnf("Failed to remove macvlan endpoint %.7s from store: %v", ep.id, err)
}
n.deleteEndpoint(ep.id)

View file

@ -78,7 +78,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
return err
}
if err := d.storeUpdate(ep); err != nil {
return fmt.Errorf("failed to save macvlan endpoint %s to store: %v", ep.id[0:7], err)
return fmt.Errorf("failed to save macvlan endpoint %.7s to store: %v", ep.id, err)
}
return nil
}

View file

@ -160,7 +160,7 @@ func (d *driver) DeleteNetwork(nid string) error {
}
if err := d.storeDelete(ep); err != nil {
logrus.Warnf("Failed to remove macvlan endpoint %s from store: %v", ep.id[0:7], err)
logrus.Warnf("Failed to remove macvlan endpoint %.7s from store: %v", ep.id, err)
}
}
// delete the *network

View file

@ -95,15 +95,15 @@ func (d *driver) populateEndpoints() error {
ep := kvo.(*endpoint)
n, ok := d.networks[ep.nid]
if !ok {
logrus.Debugf("Network (%s) not found for restored macvlan endpoint (%s)", ep.nid[0:7], ep.id[0:7])
logrus.Debugf("Deleting stale macvlan endpoint (%s) from store", ep.id[0:7])
logrus.Debugf("Network (%.7s) not found for restored macvlan endpoint (%.7s)", ep.nid, ep.id)
logrus.Debugf("Deleting stale macvlan endpoint (%.7s) from store", ep.id)
if err := d.storeDelete(ep); err != nil {
logrus.Debugf("Failed to delete stale macvlan endpoint (%s) from store", ep.id[0:7])
logrus.Debugf("Failed to delete stale macvlan endpoint (%.7s) from store", ep.id)
}
continue
}
n.endpoints[ep.id] = ep
logrus.Debugf("Endpoint (%s) restored to network (%s)", ep.id[0:7], ep.nid[0:7])
logrus.Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid)
}
return nil

View file

@ -78,7 +78,7 @@ func (e *encrMap) String() string {
}
func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal, add bool) error {
logrus.Debugf("checkEncryption(%s, %v, %d, %t)", nid[0:7], rIP, vxlanID, isLocal)
logrus.Debugf("checkEncryption(%.7s, %v, %d, %t)", nid, rIP, vxlanID, isLocal)
n := d.network(nid)
if n == nil || !n.secure {
@ -101,7 +101,7 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal
}
return false
}); err != nil {
logrus.Warnf("Failed to retrieve list of participating nodes in overlay network %s: %v", nid[0:5], err)
logrus.Warnf("Failed to retrieve list of participating nodes in overlay network %.5s: %v", nid, err)
}
default:
if len(d.network(nid).endpoints) > 0 {

View file

@ -69,7 +69,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
ep.ifName = containerIfName
if err = d.writeEndpointToStore(ep); err != nil {
return fmt.Errorf("failed to update overlay endpoint %s to local data store: %v", ep.id[0:7], err)
return fmt.Errorf("failed to update overlay endpoint %.7s to local data store: %v", ep.id, err)
}
// Set the container interface and its peer MTU to 1450 to allow

View file

@ -1,72 +1,23 @@
package overlay
import (
"io/ioutil"
"path"
"strconv"
"strings"
"github.com/sirupsen/logrus"
"github.com/docker/libnetwork/osl/kernel"
)
type conditionalCheck func(val1, val2 string) bool
type osValue struct {
value string
checkFn conditionalCheck
}
var osConfig = map[string]osValue{
var ovConfig = map[string]*kernel.OSValue{
"net.ipv4.neigh.default.gc_thresh1": {"8192", checkHigher},
"net.ipv4.neigh.default.gc_thresh2": {"49152", checkHigher},
"net.ipv4.neigh.default.gc_thresh3": {"65536", checkHigher},
}
func propertyIsValid(val1, val2 string, check conditionalCheck) bool {
if check == nil || check(val1, val2) {
return true
}
return false
}
func checkHigher(val1, val2 string) bool {
val1Int, _ := strconv.ParseInt(val1, 10, 32)
val2Int, _ := strconv.ParseInt(val2, 10, 32)
return val1Int < val2Int
}
// writeSystemProperty writes the value to a path under /proc/sys as determined from the key.
// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward.
func writeSystemProperty(key, value string) error {
keyPath := strings.Replace(key, ".", "/", -1)
return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644)
}
func readSystemProperty(key string) (string, error) {
keyPath := strings.Replace(key, ".", "/", -1)
value, err := ioutil.ReadFile(path.Join("/proc/sys", keyPath))
if err != nil {
return "", err
}
return string(value), nil
}
func applyOStweaks() {
for k, v := range osConfig {
// read the existing property from disk
oldv, err := readSystemProperty(k)
if err != nil {
logrus.Errorf("error reading the kernel parameter %s, error: %s", k, err)
continue
}
if propertyIsValid(oldv, v.value, v.checkFn) {
// write new prop value to disk
if err := writeSystemProperty(k, v.value); err != nil {
logrus.Errorf("error setting the kernel parameter %s = %s, (leaving as %s) error: %s", k, v.value, oldv, err)
continue
}
logrus.Debugf("updated kernel parameter %s = %s (was %s)", k, v.value, oldv)
}
}
kernel.ApplyOSTweaks(ovConfig)
}

View file

@ -90,7 +90,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
n.addEndpoint(ep)
if err := d.writeEndpointToStore(ep); err != nil {
return fmt.Errorf("failed to update overlay endpoint %s to local store: %v", ep.id[0:7], err)
return fmt.Errorf("failed to update overlay endpoint %.7s to local store: %v", ep.id, err)
}
return nil
@ -116,7 +116,7 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
n.deleteEndpoint(eid)
if err := d.deleteEndpointFromStore(ep); err != nil {
logrus.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err)
logrus.Warnf("Failed to delete overlay endpoint %.7s from local store: %v", ep.id, err)
}
if ep.ifName == "" {

View file

@ -274,7 +274,7 @@ func (d *driver) DeleteNetwork(nid string) error {
}
if err := d.deleteEndpointFromStore(ep); err != nil {
logrus.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err)
logrus.Warnf("Failed to delete overlay endpoint %.7s from local store: %v", ep.id, err)
}
}
// flush the peerDB entries

View file

@ -137,10 +137,10 @@ func (d *driver) restoreEndpoints() error {
ep := kvo.(*endpoint)
n := d.network(ep.nid)
if n == nil {
logrus.Debugf("Network (%s) not found for restored endpoint (%s)", ep.nid[0:7], ep.id[0:7])
logrus.Debugf("Deleting stale overlay endpoint (%s) from store", ep.id[0:7])
logrus.Debugf("Network (%.7s) not found for restored endpoint (%.7s)", ep.nid, ep.id)
logrus.Debugf("Deleting stale overlay endpoint (%.7s) from store", ep.id)
if err := d.deleteEndpointFromStore(ep); err != nil {
logrus.Debugf("Failed to delete stale overlay endpoint (%s) from store", ep.id[0:7])
logrus.Debugf("Failed to delete stale overlay endpoint (%.7s) from store", ep.id)
}
continue
}

View file

@ -80,7 +80,7 @@ func (n *network) removeEndpointWithAddress(addr *net.IPNet) {
_, err := hcsshim.HNSEndpointRequest("DELETE", networkEndpoint.profileID, "")
if err != nil {
logrus.Debugf("Failed to delete stale overlay endpoint (%s) from hns", networkEndpoint.id[0:7])
logrus.Debugf("Failed to delete stale overlay endpoint (%.7s) from hns", networkEndpoint.id)
}
}
}

View file

@ -415,7 +415,7 @@ func (d *driver) DeleteNetwork(nid string) error {
// delele endpoints belong to this network
for _, ep := range n.endpoints {
if err := d.storeDelete(ep); err != nil {
logrus.Warnf("Failed to remove bridge endpoint %s from store: %v", ep.id[0:7], err)
logrus.Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err)
}
}
@ -704,7 +704,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
}
if err = d.storeUpdate(endpoint); err != nil {
logrus.Errorf("Failed to save endpoint %s to store: %v", endpoint.id[0:7], err)
logrus.Errorf("Failed to save endpoint %.7s to store: %v", endpoint.id, err)
}
return nil
@ -731,7 +731,7 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
}
if err := d.storeDelete(ep); err != nil {
logrus.Warnf("Failed to remove bridge endpoint %s from store: %v", ep.id[0:7], err)
logrus.Warnf("Failed to remove bridge endpoint %.7s from store: %v", ep.id, err)
}
return nil
}

View file

@ -64,7 +64,7 @@ func (d *driver) populateNetworks() error {
if err = d.createNetwork(ncfg); err != nil {
logrus.Warnf("could not create windows network for id %s hnsid %s while booting up from persistent state: %v", ncfg.ID, ncfg.HnsID, err)
}
logrus.Debugf("Network %v (%s) restored", d.name, ncfg.ID[0:7])
logrus.Debugf("Network %v (%.7s) restored", d.name, ncfg.ID)
}
return nil
@ -87,15 +87,15 @@ func (d *driver) populateEndpoints() error {
}
n, ok := d.networks[ep.nid]
if !ok {
logrus.Debugf("Network (%s) not found for restored endpoint (%s)", ep.nid[0:7], ep.id[0:7])
logrus.Debugf("Deleting stale endpoint (%s) from store", ep.id[0:7])
logrus.Debugf("Network (%.7s) not found for restored endpoint (%.7s)", ep.nid, ep.id)
logrus.Debugf("Deleting stale endpoint (%.7s) from store", ep.id)
if err := d.storeDelete(ep); err != nil {
logrus.Debugf("Failed to delete stale endpoint (%s) from store", ep.id[0:7])
logrus.Debugf("Failed to delete stale endpoint (%.7s) from store", ep.id)
}
continue
}
n.endpoints[ep.id] = ep
logrus.Debugf("Endpoint (%s) restored to network (%s)", ep.id[0:7], ep.nid[0:7])
logrus.Debugf("Endpoint (%.7s) restored to network (%.7s)", ep.id, ep.nid)
}
return nil

View file

@ -203,6 +203,10 @@ func (a *Allocator) GetDefaultAddressSpaces() (string, string, error) {
}
// RequestPool returns an address pool along with its unique id.
// addressSpace must be a valid address space name and must not be the empty string.
// If pool is the empty string then the default predefined pool for addressSpace will be used, otherwise pool must be a valid IP address and length in CIDR notation.
// If subPool is not empty, it must be a valid IP address and length in CIDR notation which is a sub-range of pool.
// subPool must be empty if pool is empty.
func (a *Allocator) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) {
logrus.Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6)
@ -283,8 +287,8 @@ retry:
return remove()
}
// Given the address space, returns the local or global PoolConfig based on the
// address space is local or global. AddressSpace locality is being registered with IPAM out of band.
// Given the address space, returns the local or global PoolConfig based on whether the
// address space is local or global. AddressSpace locality is registered with IPAM out of band.
func (a *Allocator) getAddrSpace(as string) (*addrSpace, error) {
a.Lock()
defer a.Unlock()
@ -295,6 +299,8 @@ func (a *Allocator) getAddrSpace(as string) (*addrSpace, error) {
return aSpace, nil
}
// parsePoolRequest parses and validates a request to create a new pool under addressSpace and returns
// a SubnetKey, network and range describing the request.
func (a *Allocator) parsePoolRequest(addressSpace, pool, subPool string, v6 bool) (*SubnetKey, *net.IPNet, *AddressRange, error) {
var (
nw *net.IPNet

View file

@ -257,6 +257,7 @@ func (aSpace *addrSpace) New() datastore.KVObject {
}
}
// updatePoolDBOnAdd returns a closure which will add the subnet k to the address space when executed.
func (aSpace *addrSpace) updatePoolDBOnAdd(k SubnetKey, nw *net.IPNet, ipr *AddressRange, pdf bool) (func() error, error) {
aSpace.Lock()
defer aSpace.Unlock()
@ -281,7 +282,7 @@ func (aSpace *addrSpace) updatePoolDBOnAdd(k SubnetKey, nw *net.IPNet, ipr *Addr
return func() error { return aSpace.alloc.insertBitMask(k, nw) }, nil
}
// This is a new non-master pool
// This is a new non-master pool (subPool)
p := &PoolData{
ParentKey: SubnetKey{AddressSpace: k.AddressSpace, Subnet: k.Subnet},
Pool: nw,

View file

@ -1390,7 +1390,7 @@ func (n *network) addSvcRecords(eID, name, serviceID string, epIP, epIPv6 net.IP
return
}
logrus.Debugf("%s (%s).addSvcRecords(%s, %s, %s, %t) %s sid:%s", eID, n.ID()[0:7], name, epIP, epIPv6, ipMapUpdate, method, serviceID)
logrus.Debugf("%s (%.7s).addSvcRecords(%s, %s, %s, %t) %s sid:%s", eID, n.ID(), name, epIP, epIPv6, ipMapUpdate, method, serviceID)
c := n.getController()
c.Lock()
@ -1426,7 +1426,7 @@ func (n *network) deleteSvcRecords(eID, name, serviceID string, epIP net.IP, epI
return
}
logrus.Debugf("%s (%s).deleteSvcRecords(%s, %s, %s, %t) %s sid:%s ", eID, n.ID()[0:7], name, epIP, epIPv6, ipMapUpdate, method, serviceID)
logrus.Debugf("%s (%.7s).deleteSvcRecords(%s, %s, %s, %t) %s sid:%s ", eID, n.ID(), name, epIP, epIPv6, ipMapUpdate, method, serviceID)
c := n.getController()
c.Lock()
@ -2125,7 +2125,8 @@ func (n *network) lbEndpointName() string {
func (n *network) createLoadBalancerSandbox() (retErr error) {
sandboxName := n.lbSandboxName()
sbOptions := []SandboxOption{}
// Mark the sandbox to be a load balancer
sbOptions := []SandboxOption{OptionLoadBalancer()}
if n.ingress {
sbOptions = append(sbOptions, OptionIngress())
}

View file

@ -110,7 +110,6 @@ type tableEventMessage struct {
tname string
key string
msg []byte
node string
}
func (m *tableEventMessage) Invalidates(other memberlist.Broadcast) bool {
@ -168,7 +167,6 @@ func (nDB *NetworkDB) sendTableEvent(event TableEvent_Type, nid string, tname st
id: nid,
tname: tname,
key: key,
node: nDB.config.NodeID,
})
return nil
}

View file

@ -24,6 +24,9 @@ const (
retryInterval = 1 * time.Second
nodeReapInterval = 24 * time.Hour
nodeReapPeriod = 2 * time.Hour
// considering a cluster with > 20 nodes and a drain speed of 100 msg/s
// the following is roughly 1 minute
maxQueueLenBroadcastOnSync = 500
)
type logWriter struct{}
@ -52,7 +55,7 @@ func (l *logWriter) Write(p []byte) (int, error) {
// SetKey adds a new key to the key ring
func (nDB *NetworkDB) SetKey(key []byte) {
logrus.Debugf("Adding key %s", hex.EncodeToString(key)[0:5])
logrus.Debugf("Adding key %.5s", hex.EncodeToString(key))
nDB.Lock()
defer nDB.Unlock()
for _, dbKey := range nDB.config.Keys {
@ -69,7 +72,7 @@ func (nDB *NetworkDB) SetKey(key []byte) {
// SetPrimaryKey sets the given key as the primary key. This should have
// been added apriori through SetKey
func (nDB *NetworkDB) SetPrimaryKey(key []byte) {
logrus.Debugf("Primary Key %s", hex.EncodeToString(key)[0:5])
logrus.Debugf("Primary Key %.5s", hex.EncodeToString(key))
nDB.RLock()
defer nDB.RUnlock()
for _, dbKey := range nDB.config.Keys {
@ -85,7 +88,7 @@ func (nDB *NetworkDB) SetPrimaryKey(key []byte) {
// RemoveKey removes a key from the key ring. The key being removed
// can't be the primary key
func (nDB *NetworkDB) RemoveKey(key []byte) {
logrus.Debugf("Remove Key %s", hex.EncodeToString(key)[0:5])
logrus.Debugf("Remove Key %.5s", hex.EncodeToString(key))
nDB.Lock()
defer nDB.Unlock()
for i, dbKey := range nDB.config.Keys {
@ -123,7 +126,7 @@ func (nDB *NetworkDB) clusterInit() error {
var err error
if len(nDB.config.Keys) > 0 {
for i, key := range nDB.config.Keys {
logrus.Debugf("Encryption key %d: %s", i+1, hex.EncodeToString(key)[0:5])
logrus.Debugf("Encryption key %d: %.5s", i+1, hex.EncodeToString(key))
}
nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0])
if err != nil {
@ -285,18 +288,35 @@ func (nDB *NetworkDB) rejoinClusterBootStrap() {
return
}
myself, _ := nDB.nodes[nDB.config.NodeID]
bootStrapIPs := make([]string, 0, len(nDB.bootStrapIP))
for _, bootIP := range nDB.bootStrapIP {
for _, node := range nDB.nodes {
if node.Addr.Equal(bootIP) {
// One of the bootstrap nodes is part of the cluster, return
nDB.RUnlock()
return
}
// botostrap IPs are usually IP:port from the Join
var bootstrapIP net.IP
ipStr, _, err := net.SplitHostPort(bootIP)
if err != nil {
// try to parse it as an IP with port
// Note this seems to be the case for swarm that do not specify any port
ipStr = bootIP
}
bootstrapIP = net.ParseIP(ipStr)
if bootstrapIP != nil {
for _, node := range nDB.nodes {
if node.Addr.Equal(bootstrapIP) && !node.Addr.Equal(myself.Addr) {
// One of the bootstrap nodes (and not myself) is part of the cluster, return
nDB.RUnlock()
return
}
}
bootStrapIPs = append(bootStrapIPs, bootIP)
}
bootStrapIPs = append(bootStrapIPs, bootIP.String())
}
nDB.RUnlock()
if len(bootStrapIPs) == 0 {
// this will also avoid to call the Join with an empty list erasing the current bootstrap ip list
logrus.Debug("rejoinClusterBootStrap did not find any valid IP")
return
}
// None of the bootStrap nodes are in the cluster, call memberlist join
logrus.Debugf("rejoinClusterBootStrap, calling cluster join with bootStrap %v", bootStrapIPs)
ctx, cancel := context.WithTimeout(nDB.ctx, rejoinClusterDuration)
@ -555,6 +575,7 @@ func (nDB *NetworkDB) bulkSync(nodes []string, all bool) ([]string, error) {
var err error
var networks []string
var success bool
for _, node := range nodes {
if node == nDB.config.NodeID {
continue
@ -562,21 +583,25 @@ func (nDB *NetworkDB) bulkSync(nodes []string, all bool) ([]string, error) {
logrus.Debugf("%v(%v): Initiating bulk sync with node %v", nDB.config.Hostname, nDB.config.NodeID, node)
networks = nDB.findCommonNetworks(node)
err = nDB.bulkSyncNode(networks, node, true)
// if its periodic bulksync stop after the first successful sync
if !all && err == nil {
break
}
if err != nil {
err = fmt.Errorf("bulk sync to node %s failed: %v", node, err)
logrus.Warn(err.Error())
} else {
// bulk sync succeeded
success = true
// if its periodic bulksync stop after the first successful sync
if !all {
break
}
}
}
if err != nil {
return nil, err
if success {
// if at least one node sync succeeded
return networks, nil
}
return networks, nil
return nil, err
}
// Bulk sync all the table entries belonging to a set of networks to a

View file

@ -142,7 +142,7 @@ func (nDB *NetworkDB) handleNetworkEvent(nEvent *NetworkEvent) bool {
return true
}
func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent, isBulkSync bool) bool {
// Update our local clock if the received messages has newer time.
nDB.tableClock.Witness(tEvent.LTime)
@ -175,6 +175,14 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
nDB.Unlock()
return false
}
} else if tEvent.Type == TableEventTypeDelete && !isBulkSync {
nDB.Unlock()
// We don't know the entry, the entry is being deleted and the message is an async message
// In this case the safest approach is to ignore it, it is possible that the queue grew so much to
// exceed the garbage collection time (the residual reap time that is in the message is not being
// updated, to avoid inserting too many messages in the queue).
// Instead the messages coming from TCP bulk sync are safe with the latest value for the garbage collection time
return false
}
e = &entry{
@ -197,11 +205,17 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
nDB.Unlock()
if err != nil && tEvent.Type == TableEventTypeDelete {
// If it is a delete event and we did not have a state for it, don't propagate to the application
// Again we don't know the entry but this is coming from a TCP sync so the message body is up to date.
// We had saved the state so to speed up convergence and be able to avoid accepting create events.
// Now we will rebroadcast the message if 2 conditions are met:
// 1) we had already synced this network (during the network join)
// 2) the residual reapTime is higher than 1/6 of the total reapTime.
// If the residual reapTime is lower or equal to 1/6 of the total reapTime don't bother broadcasting it around
// most likely the cluster is already aware of it, if not who will sync with this node will catch the state too.
// This also avoids that deletion of entries close to their garbage collection ends up circuling around forever
return e.reapTime > nDB.config.reapEntryInterval/6
// most likely the cluster is already aware of it
// This also reduce the possibility that deletion of entries close to their garbage collection ends up circuling around
// forever
//logrus.Infof("exiting on delete not knowing the obj with rebroadcast:%t", network.inSync)
return network.inSync && e.reapTime > nDB.config.reapEntryInterval/6
}
var op opType
@ -215,7 +229,7 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
}
nDB.broadcaster.Write(makeEvent(op, tEvent.TableName, tEvent.NetworkID, tEvent.Key, tEvent.Value))
return true
return network.inSync
}
func (nDB *NetworkDB) handleCompound(buf []byte, isBulkSync bool) {
@ -244,7 +258,7 @@ func (nDB *NetworkDB) handleTableMessage(buf []byte, isBulkSync bool) {
return
}
if rebroadcast := nDB.handleTableEvent(&tEvent); rebroadcast {
if rebroadcast := nDB.handleTableEvent(&tEvent, isBulkSync); rebroadcast {
var err error
buf, err = encodeRawMessage(MessageTypeTableEvent, buf)
if err != nil {
@ -261,12 +275,16 @@ func (nDB *NetworkDB) handleTableMessage(buf []byte, isBulkSync bool) {
return
}
// if the queue is over the threshold, avoid distributing information coming from TCP sync
if isBulkSync && n.tableBroadcasts.NumQueued() > maxQueueLenBroadcastOnSync {
return
}
n.tableBroadcasts.QueueBroadcast(&tableEventMessage{
msg: buf,
id: tEvent.NetworkID,
tname: tEvent.TableName,
key: tEvent.Key,
node: tEvent.NodeName,
})
}
}

View file

@ -5,7 +5,6 @@ package networkdb
import (
"context"
"fmt"
"net"
"os"
"strings"
"sync"
@ -96,7 +95,7 @@ type NetworkDB struct {
// bootStrapIP is the list of IPs that can be used to bootstrap
// the gossip.
bootStrapIP []net.IP
bootStrapIP []string
// lastStatsTimestamp is the last timestamp when the stats got printed
lastStatsTimestamp time.Time
@ -131,6 +130,9 @@ type network struct {
// Lamport time for the latest state of the entry.
ltime serf.LamportTime
// Gets set to true after the first bulk sync happens
inSync bool
// Node leave is in progress.
leaving bool
@ -268,10 +270,8 @@ func New(c *Config) (*NetworkDB, error) {
// instances passed by the caller in the form of addr:port
func (nDB *NetworkDB) Join(members []string) error {
nDB.Lock()
nDB.bootStrapIP = make([]net.IP, 0, len(members))
for _, m := range members {
nDB.bootStrapIP = append(nDB.bootStrapIP, net.ParseIP(m))
}
nDB.bootStrapIP = append([]string(nil), members...)
logrus.Infof("The new bootstrap node list is:%v", nDB.bootStrapIP)
nDB.Unlock()
return nDB.clusterJoin(members)
}
@ -619,6 +619,7 @@ func (nDB *NetworkDB) JoinNetwork(nid string) error {
}
nDB.addNetworkNode(nid, nDB.config.NodeID)
networkNodes := nDB.networkNodes[nid]
n = nodeNetworks[nid]
nDB.Unlock()
if err := nDB.sendNetworkEvent(nid, NetworkEventTypeJoin, ltime); err != nil {
@ -630,6 +631,12 @@ func (nDB *NetworkDB) JoinNetwork(nid string) error {
logrus.Errorf("Error bulk syncing while joining network %s: %v", nid, err)
}
// Mark the network as being synced
// note this is a best effort, we are not checking the result of the bulk sync
nDB.Lock()
n.inSync = true
nDB.Unlock()
return nil
}

View file

@ -28,6 +28,7 @@ var NetDbPaths2Func = map[string]diagnostic.HTTPHandlerFunc{
"/deleteentry": dbDeleteEntry,
"/getentry": dbGetEntry,
"/gettable": dbGetTable,
"/networkstats": dbNetworkStats,
}
func dbJoin(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@ -411,3 +412,41 @@ func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) {
}
diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}
func dbNetworkStats(ctx interface{}, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
diagnostic.DebugHTTPForm(r)
_, json := diagnostic.ParseHTTPFormOptions(r)
// audit logs
log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
log.Info("network stats")
if len(r.Form["nid"]) < 1 {
rsp := diagnostic.WrongCommand(missingParameter, fmt.Sprintf("%s?nid=test", r.URL.Path))
log.Error("network stats failed, wrong input")
diagnostic.HTTPReply(w, rsp, json)
return
}
nDB, ok := ctx.(*NetworkDB)
if ok {
nDB.RLock()
networks := nDB.networks[nDB.config.NodeID]
network, ok := networks[r.Form["nid"][0]]
entries := -1
qLen := -1
if ok {
entries = network.entriesNumber
qLen = network.tableBroadcasts.NumQueued()
}
nDB.RUnlock()
rsp := diagnostic.CommandSucceed(&diagnostic.NetworkStatsResult{Entries: entries, QueueLen: qLen})
log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("network stats done")
diagnostic.HTTPReply(w, rsp, json)
return
}
diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}

View file

@ -0,0 +1,16 @@
package kernel
type conditionalCheck func(val1, val2 string) bool
// OSValue represents a tuple, value defired, check function when to apply the value
type OSValue struct {
Value string
CheckFn conditionalCheck
}
func propertyIsValid(val1, val2 string, check conditionalCheck) bool {
if check == nil || check(val1, val2) {
return true
}
return false
}

View file

@ -0,0 +1,47 @@
package kernel
import (
"io/ioutil"
"path"
"strings"
"github.com/sirupsen/logrus"
)
// writeSystemProperty writes the value to a path under /proc/sys as determined from the key.
// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward.
func writeSystemProperty(key, value string) error {
keyPath := strings.Replace(key, ".", "/", -1)
return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644)
}
// readSystemProperty reads the value from the path under /proc/sys and returns it
func readSystemProperty(key string) (string, error) {
keyPath := strings.Replace(key, ".", "/", -1)
value, err := ioutil.ReadFile(path.Join("/proc/sys", keyPath))
if err != nil {
return "", err
}
return strings.TrimSpace(string(value)), nil
}
// ApplyOSTweaks applies the configuration values passed as arguments
func ApplyOSTweaks(osConfig map[string]*OSValue) {
for k, v := range osConfig {
// read the existing property from disk
oldv, err := readSystemProperty(k)
if err != nil {
logrus.WithError(err).Errorf("error reading the kernel parameter %s", k)
continue
}
if propertyIsValid(oldv, v.Value, v.CheckFn) {
// write new prop value to disk
if err := writeSystemProperty(k, v.Value); err != nil {
logrus.WithError(err).Errorf("error setting the kernel parameter %s = %s, (leaving as %s)", k, v.Value, oldv)
continue
}
logrus.Debugf("updated kernel parameter %s = %s (was %s)", k, v.Value, oldv)
}
}
}

View file

@ -0,0 +1,7 @@
// +build !linux
package kernel
// ApplyOSTweaks applies the configuration values passed as arguments
func ApplyOSTweaks(osConfig map[string]*OSValue) {
}

View file

@ -16,6 +16,7 @@ import (
"github.com/docker/docker/pkg/reexec"
"github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/osl/kernel"
"github.com/docker/libnetwork/types"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
@ -29,13 +30,18 @@ func init() {
}
var (
once sync.Once
garbagePathMap = make(map[string]bool)
gpmLock sync.Mutex
gpmWg sync.WaitGroup
gpmCleanupPeriod = 60 * time.Second
gpmChan = make(chan chan struct{})
prefix = defaultPrefix
once sync.Once
garbagePathMap = make(map[string]bool)
gpmLock sync.Mutex
gpmWg sync.WaitGroup
gpmCleanupPeriod = 60 * time.Second
gpmChan = make(chan chan struct{})
prefix = defaultPrefix
loadBalancerConfig = map[string]*kernel.OSValue{
// expires connection from the IPVS connection table when the backend is not available
// more info: https://github.com/torvalds/linux/blob/master/Documentation/networking/ipvs-sysctl.txt#L126:1
"net.ipv4.vs.expire_nodest_conn": {"1", nil},
}
)
// The networkNamespace type is the linux implementation of the Sandbox
@ -630,3 +636,13 @@ func setIPv6(path, iface string, enable bool) error {
}
return nil
}
// ApplyOSTweaks applies linux configs on the sandbox
func (n *networkNamespace) ApplyOSTweaks(types []SandboxType) {
for _, t := range types {
switch t {
case SandboxTypeLoadBalancer:
kernel.ApplyOSTweaks(loadBalancerConfig)
}
}
}

View file

@ -7,6 +7,16 @@ import (
"github.com/docker/libnetwork/types"
)
// SandboxType specify the time of the sandbox, this can be used to apply special configs
type SandboxType int
const (
// SandboxTypeIngress indicates that the sandbox is for the ingress
SandboxTypeIngress = iota
// SandboxTypeLoadBalancer indicates that the sandbox is a load balancer
SandboxTypeLoadBalancer = iota
)
// Sandbox represents a network sandbox, identified by a specific key. It
// holds a list of Interfaces, routes etc, and more can be added dynamically.
type Sandbox interface {
@ -70,6 +80,9 @@ type Sandbox interface {
// restore sandbox
Restore(ifsopt map[string][]IfaceOption, routes []*types.StaticRoute, gw net.IP, gw6 net.IP) error
// ApplyOSTweaks applies operating system specific knobs on the sandbox
ApplyOSTweaks([]SandboxType)
}
// NeighborOptionSetter interface defines the option setter methods for interface options

View file

@ -83,6 +83,7 @@ type sandbox struct {
inDelete bool
ingress bool
ndotsSet bool
oslTypes []osl.SandboxType // slice of properties of this sandbox
sync.Mutex
// This mutex is used to serialize service related operation for an endpoint
// The lock is here because the endpoint is saved into the store so is not unique
@ -1162,6 +1163,15 @@ func OptionPortMapping(portBindings []types.PortBinding) SandboxOption {
func OptionIngress() SandboxOption {
return func(sb *sandbox) {
sb.ingress = true
sb.oslTypes = append(sb.oslTypes, osl.SandboxTypeIngress)
}
}
// OptionLoadBalancer function returns an option setter for marking a
// sandbox as a load balancer sandbox.
func OptionLoadBalancer() SandboxOption {
return func(sb *sandbox) {
sb.oslTypes = append(sb.oslTypes, osl.SandboxTypeLoadBalancer)
}
}

View file

@ -369,11 +369,13 @@ dnsOpt:
return fmt.Errorf("invalid ndots option %v", option)
}
if num, err := strconv.Atoi(parts[1]); err != nil {
return fmt.Errorf("invalid number for ndots option %v", option)
} else if num > 0 {
return fmt.Errorf("invalid number for ndots option: %v", parts[1])
} else if num >= 0 {
// if the user sets ndots, use the user setting
sb.ndotsSet = true
break dnsOpt
} else {
return fmt.Errorf("invalid number for ndots option: %v", num)
}
}
}

View file

@ -244,7 +244,7 @@ func (c *controller) sandboxCleanup(activeSandboxes map[string]interface{}) {
}
sb.osSbox, err = osl.NewSandbox(sb.Key(), create, isRestore)
if err != nil {
logrus.Errorf("failed to create osl sandbox while trying to restore sandbox %s%s: %v", sb.ID()[0:7], msg, err)
logrus.Errorf("failed to create osl sandbox while trying to restore sandbox %.7s%s: %v", sb.ID(), msg, err)
continue
}

View file

@ -43,7 +43,7 @@ func (sb *sandbox) populateLoadBalancers(ep *endpoint) {
if n.ingress {
if err := addRedirectRules(sb.Key(), eIP, ep.ingressPorts); err != nil {
logrus.Errorf("Failed to add redirect rules for ep %s (%s): %v", ep.Name(), ep.ID()[0:7], err)
logrus.Errorf("Failed to add redirect rules for ep %s (%.7s): %v", ep.Name(), ep.ID(), err)
}
}
}
@ -106,7 +106,7 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) {
i, err := ipvs.New(sb.Key())
if err != nil {
logrus.Errorf("Failed to create an ipvs handle for sbox %s (%s,%s) for lb addition: %v", sb.ID()[0:7], sb.ContainerID()[0:7], sb.Key(), err)
logrus.Errorf("Failed to create an ipvs handle for sbox %.7s (%.7s,%s) for lb addition: %v", sb.ID(), sb.ContainerID(), sb.Key(), err)
return
}
defer i.Close()
@ -142,14 +142,14 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) {
}
}
logrus.Debugf("Creating service for vip %s fwMark %d ingressPorts %#v in sbox %s (%s)", lb.vip, lb.fwMark, lb.service.ingressPorts, sb.ID()[0:7], sb.ContainerID()[0:7])
logrus.Debugf("Creating service for vip %s fwMark %d ingressPorts %#v in sbox %.7s (%.7s)", lb.vip, lb.fwMark, lb.service.ingressPorts, sb.ID(), sb.ContainerID())
if err := invokeFWMarker(sb.Key(), lb.vip, lb.fwMark, lb.service.ingressPorts, eIP, false); err != nil {
logrus.Errorf("Failed to add firewall mark rule in sbox %s (%s): %v", sb.ID()[0:7], sb.ContainerID()[0:7], err)
logrus.Errorf("Failed to add firewall mark rule in sbox %.7s (%.7s): %v", sb.ID(), sb.ContainerID(), err)
return
}
if err := i.NewService(s); err != nil && err != syscall.EEXIST {
logrus.Errorf("Failed to create a new service for vip %s fwmark %d in sbox %s (%s): %v", lb.vip, lb.fwMark, sb.ID()[0:7], sb.ContainerID()[0:7], err)
logrus.Errorf("Failed to create a new service for vip %s fwmark %d in sbox %.7s (%.7s): %v", lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err)
return
}
}
@ -164,7 +164,7 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) {
// destination.
s.SchedName = ""
if err := i.NewDestination(s, d); err != nil && err != syscall.EEXIST {
logrus.Errorf("Failed to create real server %s for vip %s fwmark %d in sbox %s (%s): %v", ip, lb.vip, lb.fwMark, sb.ID()[0:7], sb.ContainerID()[0:7], err)
logrus.Errorf("Failed to create real server %s for vip %s fwmark %d in sbox %.7s (%.7s): %v", ip, lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err)
}
}
@ -189,7 +189,7 @@ func (n *network) rmLBBackend(ip net.IP, lb *loadBalancer, rmService bool, fullR
i, err := ipvs.New(sb.Key())
if err != nil {
logrus.Errorf("Failed to create an ipvs handle for sbox %s (%s,%s) for lb removal: %v", sb.ID()[0:7], sb.ContainerID()[0:7], sb.Key(), err)
logrus.Errorf("Failed to create an ipvs handle for sbox %.7s (%.7s,%s) for lb removal: %v", sb.ID(), sb.ContainerID(), sb.Key(), err)
return
}
defer i.Close()
@ -207,19 +207,19 @@ func (n *network) rmLBBackend(ip net.IP, lb *loadBalancer, rmService bool, fullR
if fullRemove {
if err := i.DelDestination(s, d); err != nil && err != syscall.ENOENT {
logrus.Errorf("Failed to delete real server %s for vip %s fwmark %d in sbox %s (%s): %v", ip, lb.vip, lb.fwMark, sb.ID()[0:7], sb.ContainerID()[0:7], err)
logrus.Errorf("Failed to delete real server %s for vip %s fwmark %d in sbox %.7s (%.7s): %v", ip, lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err)
}
} else {
d.Weight = 0
if err := i.UpdateDestination(s, d); err != nil && err != syscall.ENOENT {
logrus.Errorf("Failed to set LB weight of real server %s to 0 for vip %s fwmark %d in sbox %s (%s): %v", ip, lb.vip, lb.fwMark, sb.ID()[0:7], sb.ContainerID()[0:7], err)
logrus.Errorf("Failed to set LB weight of real server %s to 0 for vip %s fwmark %d in sbox %.7s (%.7s): %v", ip, lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err)
}
}
if rmService {
s.SchedName = ipvs.RoundRobin
if err := i.DelService(s); err != nil && err != syscall.ENOENT {
logrus.Errorf("Failed to delete service for vip %s fwmark %d in sbox %s (%s): %v", lb.vip, lb.fwMark, sb.ID()[0:7], sb.ContainerID()[0:7], err)
logrus.Errorf("Failed to delete service for vip %s fwmark %d in sbox %.7s (%.7s): %v", lb.vip, lb.fwMark, sb.ID(), sb.ContainerID(), err)
}
if sb.ingress {
@ -234,7 +234,7 @@ func (n *network) rmLBBackend(ip net.IP, lb *loadBalancer, rmService bool, fullR
}
if err := invokeFWMarker(sb.Key(), lb.vip, lb.fwMark, lb.service.ingressPorts, eIP, true); err != nil {
logrus.Errorf("Failed to delete firewall mark rule in sbox %s (%s): %v", sb.ID()[0:7], sb.ContainerID()[0:7], err)
logrus.Errorf("Failed to delete firewall mark rule in sbox %.7s (%.7s): %v", sb.ID(), sb.ContainerID(), err)
}
// Remove IP alias from the VIP to the endpoint

View file

@ -332,6 +332,8 @@ func CompareIPNet(a, b *net.IPNet) bool {
}
// GetMinimalIP returns the address in its shortest form
// If ip contains an IPv4-mapped IPv6 address, the 4-octet form of the IPv4 address will be returned.
// Otherwise ip is returned unchanged.
func GetMinimalIP(ip net.IP) net.IP {
if ip != nil && ip.To4() != nil {
return ip.To4()

View file

@ -43,7 +43,7 @@ github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
github.com/sirupsen/logrus v1.0.3
github.com/stretchr/testify dab07ac62d4905d3e48d17dc549c684ac3b7c15a
github.com/stretchr/testify v1.2.2
github.com/syndtr/gocapability 33e07d32887e1e06b7c025f27ce52f62c7990bc0
github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
@ -55,8 +55,8 @@ golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/davecgh/go-spew v1.1.0
github.com/pmezard/go-difflib v1.0.0
github.com/cyphar/filepath-securejoin v0.2.1
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
github.com/hashicorp/go-immutable-radix 7f3cd4390caab3250a57f30efdb2a65dd7649ecf