Libnetwork vendoring

- removed support for Solaris
- networkdb fixed race on node management

Signed-off-by: Flavio Crisciani <flavio.crisciani@docker.com>
This commit is contained in:
Flavio Crisciani 2017-11-27 14:57:45 -08:00
parent c75c45b188
commit 2e5d5c1d32
No known key found for this signature in database
GPG key ID: 28CAFCE754CF3A48
36 changed files with 188 additions and 5344 deletions

View file

@ -30,7 +30,7 @@ github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
#get libnetwork packages
github.com/docker/libnetwork 72fd7e5495eba86e28012e39b5ed63ef9ca9a97b
github.com/docker/libnetwork f7d21337cf1eb628ad54eecac0881fa23ec266df
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec

View file

@ -1,32 +0,0 @@
package libnetwork
import (
"fmt"
"strconv"
"github.com/docker/libnetwork/drivers/solaris/bridge"
)
const libnGWNetwork = "docker_gwbridge"
func getPlatformOption() EndpointOption {
return nil
}
func (c *controller) createGWNetwork() (Network, error) {
netOption := map[string]string{
bridge.BridgeName: libnGWNetwork,
bridge.EnableICC: strconv.FormatBool(false),
bridge.EnableIPMasquerade: strconv.FormatBool(true),
}
n, err := c.NewNetwork("bridge", libnGWNetwork, "",
NetworkOptionDriverOpts(netOption),
NetworkOptionEnableIPv6(false),
)
if err != nil {
return nil, fmt.Errorf("error creating external connectivity network: %v", err)
}
return n, err
}

View file

@ -81,7 +81,7 @@ func (n *Server) EnableDebug(ip string, port int) {
// go func() {
// http.Serve(n.sk, n.mux)
// }()
http.ListenAndServe(":8000", n.mux)
http.ListenAndServe(fmt.Sprintf(":%d", port), n.mux)
}
// DisableDebug stop the dubug and closes the tcp socket

File diff suppressed because it is too large Load diff

View file

@ -1,384 +0,0 @@
// +build solaris
package bridge
import (
"encoding/json"
"fmt"
"net"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/discoverapi"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/types"
"github.com/sirupsen/logrus"
)
const (
// network config prefix was not specific enough.
// To be backward compatible, need custom endpoint
// prefix with different root
bridgePrefix = "bridge"
bridgeEndpointPrefix = "bridge-endpoint"
)
func (d *driver) initStore(option map[string]interface{}) error {
if data, ok := option[netlabel.LocalKVClient]; ok {
var err error
dsc, ok := data.(discoverapi.DatastoreConfigData)
if !ok {
return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
}
d.store, err = datastore.NewDataStoreFromConfig(dsc)
if err != nil {
return types.InternalErrorf("bridge driver failed to initialize data store: %v", err)
}
err = d.populateNetworks()
if err != nil {
return err
}
err = d.populateEndpoints()
if err != nil {
return err
}
}
return nil
}
func (d *driver) populateNetworks() error {
kvol, err := d.store.List(datastore.Key(bridgePrefix), &networkConfiguration{})
if err != nil && err != datastore.ErrKeyNotFound {
return fmt.Errorf("failed to get bridge network configurations from store: %v", err)
}
// It's normal for network configuration state to be empty. Just return.
if err == datastore.ErrKeyNotFound {
return nil
}
for _, kvo := range kvol {
ncfg := kvo.(*networkConfiguration)
if err = d.createNetwork(ncfg); err != nil {
logrus.Warnf("could not create bridge network for id %s bridge name %s while booting up from persistent state: %v", ncfg.ID, ncfg.BridgeName, err)
}
logrus.Debugf("Network (%s) restored", ncfg.ID[0:7])
}
return nil
}
func (d *driver) populateEndpoints() error {
kvol, err := d.store.List(datastore.Key(bridgeEndpointPrefix), &bridgeEndpoint{})
if err != nil && err != datastore.ErrKeyNotFound {
return fmt.Errorf("failed to get bridge endpoints from store: %v", err)
}
if err == datastore.ErrKeyNotFound {
return nil
}
for _, kvo := range kvol {
ep := kvo.(*bridgeEndpoint)
n, ok := d.networks[ep.nid]
if !ok {
logrus.Debugf("Network (%s) not found for restored bridge endpoint (%s)", ep.nid[0:7], ep.id[0:7])
logrus.Debugf("Deleting stale bridge endpoint (%s) from store", ep.nid[0:7])
if err := d.storeDelete(ep); err != nil {
logrus.Debugf("Failed to delete stale bridge endpoint (%s) from store", ep.nid[0:7])
}
continue
}
n.endpoints[ep.id] = ep
n.restorePortAllocations(ep)
logrus.Debugf("Endpoint (%s) restored to network (%s)", ep.id[0:7], ep.nid[0:7])
}
return nil
}
func (d *driver) storeUpdate(kvObject datastore.KVObject) error {
if d.store == nil {
logrus.Warnf("bridge store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...))
return nil
}
if err := d.store.PutObjectAtomic(kvObject); err != nil {
return fmt.Errorf("failed to update bridge store for object type %T: %v", kvObject, err)
}
return nil
}
func (d *driver) storeDelete(kvObject datastore.KVObject) error {
if d.store == nil {
logrus.Debugf("bridge store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...))
return nil
}
retry:
if err := d.store.DeleteObjectAtomic(kvObject); err != nil {
if err == datastore.ErrKeyModified {
if err := d.store.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil {
return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err)
}
goto retry
}
return err
}
return nil
}
func (ncfg *networkConfiguration) MarshalJSON() ([]byte, error) {
nMap := make(map[string]interface{})
nMap["ID"] = ncfg.ID
nMap["BridgeName"] = ncfg.BridgeName
nMap["BridgeNameInternal"] = ncfg.BridgeNameInternal
nMap["EnableIPv6"] = ncfg.EnableIPv6
nMap["EnableIPMasquerade"] = ncfg.EnableIPMasquerade
nMap["EnableICC"] = ncfg.EnableICC
nMap["Mtu"] = ncfg.Mtu
nMap["Internal"] = ncfg.Internal
nMap["DefaultBridge"] = ncfg.DefaultBridge
nMap["DefaultBindingIP"] = ncfg.DefaultBindingIP.String()
nMap["DefaultBindingIntf"] = ncfg.DefaultBindingIntf
nMap["DefaultGatewayIPv4"] = ncfg.DefaultGatewayIPv4.String()
nMap["DefaultGatewayIPv6"] = ncfg.DefaultGatewayIPv6.String()
if ncfg.AddressIPv4 != nil {
nMap["AddressIPv4"] = ncfg.AddressIPv4.String()
}
if ncfg.AddressIPv6 != nil {
nMap["AddressIPv6"] = ncfg.AddressIPv6.String()
}
return json.Marshal(nMap)
}
func (ncfg *networkConfiguration) UnmarshalJSON(b []byte) error {
var (
err error
nMap map[string]interface{}
)
if err = json.Unmarshal(b, &nMap); err != nil {
return err
}
if v, ok := nMap["AddressIPv4"]; ok {
if ncfg.AddressIPv4, err = types.ParseCIDR(v.(string)); err != nil {
return types.InternalErrorf("failed to decode bridge network address IPv4 after json unmarshal: %s", v.(string))
}
}
if v, ok := nMap["AddressIPv6"]; ok {
if ncfg.AddressIPv6, err = types.ParseCIDR(v.(string)); err != nil {
return types.InternalErrorf("failed to decode bridge network address IPv6 after json unmarshal: %s", v.(string))
}
}
ncfg.DefaultBridge = nMap["DefaultBridge"].(bool)
ncfg.DefaultBindingIP = net.ParseIP(nMap["DefaultBindingIP"].(string))
ncfg.DefaultBindingIntf = nMap["DefaultBindingIntf"].(string)
ncfg.DefaultGatewayIPv4 = net.ParseIP(nMap["DefaultGatewayIPv4"].(string))
ncfg.DefaultGatewayIPv6 = net.ParseIP(nMap["DefaultGatewayIPv6"].(string))
ncfg.ID = nMap["ID"].(string)
ncfg.BridgeName = nMap["BridgeName"].(string)
ncfg.BridgeNameInternal = nMap["BridgeNameInternal"].(string)
ncfg.EnableIPv6 = nMap["EnableIPv6"].(bool)
ncfg.EnableIPMasquerade = nMap["EnableIPMasquerade"].(bool)
ncfg.EnableICC = nMap["EnableICC"].(bool)
ncfg.Mtu = int(nMap["Mtu"].(float64))
if v, ok := nMap["Internal"]; ok {
ncfg.Internal = v.(bool)
}
return nil
}
func (ncfg *networkConfiguration) Key() []string {
return []string{bridgePrefix, ncfg.ID}
}
func (ncfg *networkConfiguration) KeyPrefix() []string {
return []string{bridgePrefix}
}
func (ncfg *networkConfiguration) Value() []byte {
b, err := json.Marshal(ncfg)
if err != nil {
return nil
}
return b
}
func (ncfg *networkConfiguration) SetValue(value []byte) error {
return json.Unmarshal(value, ncfg)
}
func (ncfg *networkConfiguration) Index() uint64 {
return ncfg.dbIndex
}
func (ncfg *networkConfiguration) SetIndex(index uint64) {
ncfg.dbIndex = index
ncfg.dbExists = true
}
func (ncfg *networkConfiguration) Exists() bool {
return ncfg.dbExists
}
func (ncfg *networkConfiguration) Skip() bool {
return false
}
func (ncfg *networkConfiguration) New() datastore.KVObject {
return &networkConfiguration{}
}
func (ncfg *networkConfiguration) CopyTo(o datastore.KVObject) error {
dstNcfg := o.(*networkConfiguration)
*dstNcfg = *ncfg
return nil
}
func (ncfg *networkConfiguration) DataScope() string {
return datastore.LocalScope
}
func (ep *bridgeEndpoint) MarshalJSON() ([]byte, error) {
epMap := make(map[string]interface{})
epMap["id"] = ep.id
epMap["nid"] = ep.nid
epMap["SrcName"] = ep.srcName
epMap["MacAddress"] = ep.macAddress.String()
epMap["Addr"] = ep.addr.String()
if ep.addrv6 != nil {
epMap["Addrv6"] = ep.addrv6.String()
}
epMap["Config"] = ep.config
epMap["ContainerConfig"] = ep.containerConfig
epMap["ExternalConnConfig"] = ep.extConnConfig
epMap["PortMapping"] = ep.portMapping
return json.Marshal(epMap)
}
func (ep *bridgeEndpoint) UnmarshalJSON(b []byte) error {
var (
err error
epMap map[string]interface{}
)
if err = json.Unmarshal(b, &epMap); err != nil {
return fmt.Errorf("Failed to unmarshal to bridge endpoint: %v", err)
}
if v, ok := epMap["MacAddress"]; ok {
if ep.macAddress, err = net.ParseMAC(v.(string)); err != nil {
return types.InternalErrorf("failed to decode bridge endpoint MAC address (%s) after json unmarshal: %v", v.(string), err)
}
}
if v, ok := epMap["Addr"]; ok {
if ep.addr, err = types.ParseCIDR(v.(string)); err != nil {
return types.InternalErrorf("failed to decode bridge endpoint IPv4 address (%s) after json unmarshal: %v", v.(string), err)
}
}
if v, ok := epMap["Addrv6"]; ok {
if ep.addrv6, err = types.ParseCIDR(v.(string)); err != nil {
return types.InternalErrorf("failed to decode bridge endpoint IPv6 address (%s) after json unmarshal: %v", v.(string), err)
}
}
ep.id = epMap["id"].(string)
ep.nid = epMap["nid"].(string)
ep.srcName = epMap["SrcName"].(string)
d, _ := json.Marshal(epMap["Config"])
if err := json.Unmarshal(d, &ep.config); err != nil {
logrus.Warnf("Failed to decode endpoint config %v", err)
}
d, _ = json.Marshal(epMap["ContainerConfig"])
if err := json.Unmarshal(d, &ep.containerConfig); err != nil {
logrus.Warnf("Failed to decode endpoint container config %v", err)
}
d, _ = json.Marshal(epMap["ExternalConnConfig"])
if err := json.Unmarshal(d, &ep.extConnConfig); err != nil {
logrus.Warnf("Failed to decode endpoint external connectivity configuration %v", err)
}
d, _ = json.Marshal(epMap["PortMapping"])
if err := json.Unmarshal(d, &ep.portMapping); err != nil {
logrus.Warnf("Failed to decode endpoint port mapping %v", err)
}
return nil
}
func (ep *bridgeEndpoint) Key() []string {
return []string{bridgeEndpointPrefix, ep.id}
}
func (ep *bridgeEndpoint) KeyPrefix() []string {
return []string{bridgeEndpointPrefix}
}
func (ep *bridgeEndpoint) Value() []byte {
b, err := json.Marshal(ep)
if err != nil {
return nil
}
return b
}
func (ep *bridgeEndpoint) SetValue(value []byte) error {
return json.Unmarshal(value, ep)
}
func (ep *bridgeEndpoint) Index() uint64 {
return ep.dbIndex
}
func (ep *bridgeEndpoint) SetIndex(index uint64) {
ep.dbIndex = index
ep.dbExists = true
}
func (ep *bridgeEndpoint) Exists() bool {
return ep.dbExists
}
func (ep *bridgeEndpoint) Skip() bool {
return false
}
func (ep *bridgeEndpoint) New() datastore.KVObject {
return &bridgeEndpoint{}
}
func (ep *bridgeEndpoint) CopyTo(o datastore.KVObject) error {
dstEp := o.(*bridgeEndpoint)
*dstEp = *ep
return nil
}
func (ep *bridgeEndpoint) DataScope() string {
return datastore.LocalScope
}
func (n *bridgeNetwork) restorePortAllocations(ep *bridgeEndpoint) {
if ep.extConnConfig == nil ||
ep.extConnConfig.ExposedPorts == nil ||
ep.extConnConfig.PortBindings == nil {
return
}
tmp := ep.extConnConfig.PortBindings
ep.extConnConfig.PortBindings = ep.portMapping
_, err := n.allocatePorts(ep, n.config.DefaultBindingIntf, n.config.DefaultBindingIP, n.driver.config.EnableUserlandProxy)
if err != nil {
logrus.Warnf("Failed to reserve existing port mapping for endpoint %s:%v", ep.id[0:7], err)
}
ep.extConnConfig.PortBindings = tmp
}

View file

@ -1,119 +0,0 @@
package bridge
import "fmt"
// ErrInvalidEndpointConfig error is returned when an endpoint create is attempted with an invalid endpoint configuration.
type ErrInvalidEndpointConfig struct{}
func (eiec *ErrInvalidEndpointConfig) Error() string {
return "trying to create an endpoint with an invalid endpoint configuration"
}
// BadRequest denotes the type of this error
func (eiec *ErrInvalidEndpointConfig) BadRequest() {}
// ErrNoIPAddr error is returned when bridge has no IPv4 address configured.
type ErrNoIPAddr struct{}
func (enip *ErrNoIPAddr) Error() string {
return "bridge has no IPv4 address configured"
}
// InternalError denotes the type of this error
func (enip *ErrNoIPAddr) InternalError() {}
// ErrInvalidGateway is returned when the user provided default gateway (v4/v6) is not not valid.
type ErrInvalidGateway struct{}
func (eig *ErrInvalidGateway) Error() string {
return "default gateway ip must be part of the network"
}
// BadRequest denotes the type of this error
func (eig *ErrInvalidGateway) BadRequest() {}
// ErrInvalidMtu is returned when the user provided MTU is not valid.
type ErrInvalidMtu int
func (eim ErrInvalidMtu) Error() string {
return fmt.Sprintf("invalid MTU number: %d", int(eim))
}
// BadRequest denotes the type of this error
func (eim ErrInvalidMtu) BadRequest() {}
// ErrUnsupportedAddressType is returned when the specified address type is not supported.
type ErrUnsupportedAddressType string
func (uat ErrUnsupportedAddressType) Error() string {
return fmt.Sprintf("unsupported address type: %s", string(uat))
}
// BadRequest denotes the type of this error
func (uat ErrUnsupportedAddressType) BadRequest() {}
// ActiveEndpointsError is returned when there are
// still active endpoints in the network being deleted.
type ActiveEndpointsError string
func (aee ActiveEndpointsError) Error() string {
return fmt.Sprintf("network %s has active endpoint", string(aee))
}
// Forbidden denotes the type of this error
func (aee ActiveEndpointsError) Forbidden() {}
// InvalidNetworkIDError is returned when the passed
// network id for an existing network is not a known id.
type InvalidNetworkIDError string
func (inie InvalidNetworkIDError) Error() string {
return fmt.Sprintf("invalid network id %s", string(inie))
}
// NotFound denotes the type of this error
func (inie InvalidNetworkIDError) NotFound() {}
// InvalidEndpointIDError is returned when the passed
// endpoint id is not valid.
type InvalidEndpointIDError string
func (ieie InvalidEndpointIDError) Error() string {
return fmt.Sprintf("invalid endpoint id: %s", string(ieie))
}
// BadRequest denotes the type of this error
func (ieie InvalidEndpointIDError) BadRequest() {}
// EndpointNotFoundError is returned when the no endpoint
// with the passed endpoint id is found.
type EndpointNotFoundError string
func (enfe EndpointNotFoundError) Error() string {
return fmt.Sprintf("endpoint not found: %s", string(enfe))
}
// NotFound denotes the type of this error
func (enfe EndpointNotFoundError) NotFound() {}
// NonDefaultBridgeExistError is returned when a non-default
// bridge config is passed but it does not already exist.
type NonDefaultBridgeExistError string
func (ndbee NonDefaultBridgeExistError) Error() string {
return fmt.Sprintf("bridge device with non default name %s must be created manually", string(ndbee))
}
// Forbidden denotes the type of this error
func (ndbee NonDefaultBridgeExistError) Forbidden() {}
// NonDefaultBridgeNeedsIPError is returned when a non-default
// bridge config is passed but it has no ip configured
type NonDefaultBridgeNeedsIPError string
func (ndbee NonDefaultBridgeNeedsIPError) Error() string {
return fmt.Sprintf("bridge device with non default name %s must have a valid IP address", string(ndbee))
}
// Forbidden denotes the type of this error
func (ndbee NonDefaultBridgeNeedsIPError) Forbidden() {}

View file

@ -1,225 +0,0 @@
// +build solaris
package bridge
import (
"bytes"
"errors"
"fmt"
"net"
"os"
"os/exec"
"github.com/docker/libnetwork/types"
"github.com/sirupsen/logrus"
)
var (
defaultBindingIP = net.IPv4(0, 0, 0, 0)
)
const (
maxAllocatePortAttempts = 10
)
func addPFRules(epid, bindIntf string, bs []types.PortBinding) {
var id string
if len(epid) > 12 {
id = epid[:12]
} else {
id = epid
}
fname := "/var/lib/docker/network/files/pf." + id
f, err := os.OpenFile(fname,
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
logrus.Warn("cannot open temp pf file")
return
}
for _, b := range bs {
r := fmt.Sprintf(
"pass in on %s proto %s from any to (%s) "+
"port %d rdr-to %s port %d\n", bindIntf,
b.Proto.String(), bindIntf, b.HostPort,
b.IP.String(), b.Port)
_, err = f.WriteString(r)
if err != nil {
logrus.Warnf("cannot write firewall rules to %s: %v", fname, err)
}
}
f.Close()
anchor := fmt.Sprintf("_auto/docker/ep%s", id)
err = exec.Command("/usr/sbin/pfctl", "-a", anchor, "-f", fname).Run()
if err != nil {
logrus.Warnf("failed to add firewall rules: %v", err)
}
os.Remove(fname)
}
func removePFRules(epid string) {
var id string
if len(epid) > 12 {
id = epid[:12]
} else {
id = epid
}
anchor := fmt.Sprintf("_auto/docker/ep%s", id)
err := exec.Command("/usr/sbin/pfctl", "-a", anchor, "-F", "all").Run()
if err != nil {
logrus.Warnf("failed to remove firewall rules: %v", err)
}
}
func (n *bridgeNetwork) allocatePorts(ep *bridgeEndpoint, bindIntf string, reqDefBindIP net.IP, ulPxyEnabled bool) ([]types.PortBinding, error) {
if ep.extConnConfig == nil || ep.extConnConfig.PortBindings == nil {
return nil, nil
}
defHostIP := defaultBindingIP
if reqDefBindIP != nil {
defHostIP = reqDefBindIP
}
bs, err := n.allocatePortsInternal(ep.extConnConfig.PortBindings, bindIntf, ep.addr.IP, defHostIP, ulPxyEnabled)
if err != nil {
return nil, err
}
// Add PF rules for port bindings, if any
if len(bs) > 0 {
addPFRules(ep.id, bindIntf, bs)
}
return bs, err
}
func (n *bridgeNetwork) allocatePortsInternal(bindings []types.PortBinding, bindIntf string, containerIP, defHostIP net.IP, ulPxyEnabled bool) ([]types.PortBinding, error) {
bs := make([]types.PortBinding, 0, len(bindings))
for _, c := range bindings {
b := c.GetCopy()
if err := n.allocatePort(&b, containerIP, defHostIP); err != nil {
// On allocation failure,release previously
// allocated ports. On cleanup error, just log
// a warning message
if cuErr := n.releasePortsInternal(bs); cuErr != nil {
logrus.Warnf("Upon allocation failure "+
"for %v, failed to clear previously "+
"allocated port bindings: %v", b, cuErr)
}
return nil, err
}
bs = append(bs, b)
}
return bs, nil
}
func (n *bridgeNetwork) allocatePort(bnd *types.PortBinding, containerIP, defHostIP net.IP) error {
var (
host net.Addr
err error
)
// Store the container interface address in the operational binding
bnd.IP = containerIP
// Adjust the host address in the operational binding
if len(bnd.HostIP) == 0 {
bnd.HostIP = defHostIP
}
// Adjust HostPortEnd if this is not a range.
if bnd.HostPortEnd == 0 {
bnd.HostPortEnd = bnd.HostPort
}
// Construct the container side transport address
container, err := bnd.ContainerAddr()
if err != nil {
return err
}
// Try up to maxAllocatePortAttempts times to get a port that's
// not already allocated.
for i := 0; i < maxAllocatePortAttempts; i++ {
if host, err = n.portMapper.MapRange(container, bnd.HostIP,
int(bnd.HostPort), int(bnd.HostPortEnd), false); err == nil {
break
}
// There is no point in immediately retrying to map an
// explicitly chosen port.
if bnd.HostPort != 0 {
logrus.Warnf(
"Failed to allocate and map port %d-%d: %s",
bnd.HostPort, bnd.HostPortEnd, err)
break
}
logrus.Warnf("Failed to allocate and map port: %s, retry: %d",
err, i+1)
}
if err != nil {
return err
}
// Save the host port (regardless it was or not specified in the
// binding)
switch netAddr := host.(type) {
case *net.TCPAddr:
bnd.HostPort = uint16(host.(*net.TCPAddr).Port)
return nil
case *net.UDPAddr:
bnd.HostPort = uint16(host.(*net.UDPAddr).Port)
return nil
default:
// For completeness
return ErrUnsupportedAddressType(fmt.Sprintf("%T", netAddr))
}
}
func (n *bridgeNetwork) releasePorts(ep *bridgeEndpoint) error {
err := n.releasePortsInternal(ep.portMapping)
if err != nil {
return nil
}
// remove rules if there are any port mappings
if len(ep.portMapping) > 0 {
removePFRules(ep.id)
}
return nil
}
func (n *bridgeNetwork) releasePortsInternal(bindings []types.PortBinding) error {
var errorBuf bytes.Buffer
// Attempt to release all port bindings, do not stop on failure
for _, m := range bindings {
if err := n.releasePort(m); err != nil {
errorBuf.WriteString(
fmt.Sprintf(
"\ncould not release %v because of %v",
m, err))
}
}
if errorBuf.Len() != 0 {
return errors.New(errorBuf.String())
}
return nil
}
func (n *bridgeNetwork) releasePort(bnd types.PortBinding) error {
// Construct the host side transport address
host, err := bnd.HostAddr()
if err != nil {
return err
}
return n.portMapper.Unmap(host)
}

View file

@ -1,274 +0,0 @@
package overlay
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"hash/fnv"
"net"
"sync"
"github.com/docker/libnetwork/types"
"github.com/sirupsen/logrus"
)
const (
mark = uint32(0xD0C4E3)
timeout = 30
pktExpansion = 26 // SPI(4) + SeqN(4) + IV(8) + PadLength(1) + NextHeader(1) + ICV(8)
)
const (
forward = iota + 1
reverse
bidir
)
type key struct {
value []byte
tag uint32
}
func (k *key) String() string {
if k != nil {
return fmt.Sprintf("(key: %s, tag: 0x%x)", hex.EncodeToString(k.value)[0:5], k.tag)
}
return ""
}
type spi struct {
forward int
reverse int
}
func (s *spi) String() string {
return fmt.Sprintf("SPI(FWD: 0x%x, REV: 0x%x)", uint32(s.forward), uint32(s.reverse))
}
type encrMap struct {
nodes map[string][]*spi
sync.Mutex
}
func (e *encrMap) String() string {
e.Lock()
defer e.Unlock()
b := new(bytes.Buffer)
for k, v := range e.nodes {
b.WriteString("\n")
b.WriteString(k)
b.WriteString(":")
b.WriteString("[")
for _, s := range v {
b.WriteString(s.String())
b.WriteString(",")
}
b.WriteString("]")
}
return b.String()
}
func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal, add bool) error {
logrus.Debugf("checkEncryption(%s, %v, %d, %t)", nid[0:7], rIP, vxlanID, isLocal)
n := d.network(nid)
if n == nil || !n.secure {
return nil
}
if len(d.keys) == 0 {
return types.ForbiddenErrorf("encryption key is not present")
}
lIP := net.ParseIP(d.bindAddress)
aIP := net.ParseIP(d.advertiseAddress)
nodes := map[string]net.IP{}
switch {
case isLocal:
if err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
if !aIP.Equal(pEntry.vtep) {
nodes[pEntry.vtep.String()] = pEntry.vtep
}
return false
}); err != nil {
logrus.Warnf("Failed to retrieve list of participating nodes in overlay network %s: %v", nid[0:5], err)
}
default:
if len(d.network(nid).endpoints) > 0 {
nodes[rIP.String()] = rIP
}
}
logrus.Debugf("List of nodes: %s", nodes)
if add {
for _, rIP := range nodes {
if err := setupEncryption(lIP, aIP, rIP, vxlanID, d.secMap, d.keys); err != nil {
logrus.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err)
}
}
} else {
if len(nodes) == 0 {
if err := removeEncryption(lIP, rIP, d.secMap); err != nil {
logrus.Warnf("Failed to remove network encryption between %s and %s: %v", lIP, rIP, err)
}
}
}
return nil
}
func setupEncryption(localIP, advIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*key) error {
logrus.Debugf("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP)
rIPs := remoteIP.String()
indices := make([]*spi, 0, len(keys))
err := programMangle(vni, true)
if err != nil {
logrus.Warn(err)
}
em.Lock()
em.nodes[rIPs] = indices
em.Unlock()
return nil
}
func removeEncryption(localIP, remoteIP net.IP, em *encrMap) error {
return nil
}
func programMangle(vni uint32, add bool) (err error) {
return
}
func buildSPI(src, dst net.IP, st uint32) int {
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, st)
h := fnv.New32a()
h.Write(src)
h.Write(b)
h.Write(dst)
return int(binary.BigEndian.Uint32(h.Sum(nil)))
}
func (d *driver) secMapWalk(f func(string, []*spi) ([]*spi, bool)) error {
d.secMap.Lock()
for node, indices := range d.secMap.nodes {
idxs, stop := f(node, indices)
if idxs != nil {
d.secMap.nodes[node] = idxs
}
if stop {
break
}
}
d.secMap.Unlock()
return nil
}
func (d *driver) setKeys(keys []*key) error {
if d.keys != nil {
return types.ForbiddenErrorf("initial keys are already present")
}
d.keys = keys
logrus.Debugf("Initial encryption keys: %v", d.keys)
return nil
}
// updateKeys allows to add a new key and/or change the primary key and/or prune an existing key
// The primary key is the key used in transmission and will go in first position in the list.
func (d *driver) updateKeys(newKey, primary, pruneKey *key) error {
logrus.Debugf("Updating Keys. New: %v, Primary: %v, Pruned: %v", newKey, primary, pruneKey)
logrus.Debugf("Current: %v", d.keys)
var (
newIdx = -1
priIdx = -1
delIdx = -1
lIP = net.ParseIP(d.bindAddress)
)
d.Lock()
// add new
if newKey != nil {
d.keys = append(d.keys, newKey)
newIdx += len(d.keys)
}
for i, k := range d.keys {
if primary != nil && k.tag == primary.tag {
priIdx = i
}
if pruneKey != nil && k.tag == pruneKey.tag {
delIdx = i
}
}
d.Unlock()
if (newKey != nil && newIdx == -1) ||
(primary != nil && priIdx == -1) ||
(pruneKey != nil && delIdx == -1) {
err := types.BadRequestErrorf("cannot find proper key indices while processing key update:"+
"(newIdx,priIdx,delIdx):(%d, %d, %d)", newIdx, priIdx, delIdx)
logrus.Warn(err)
return err
}
d.secMapWalk(func(rIPs string, spis []*spi) ([]*spi, bool) {
rIP := net.ParseIP(rIPs)
return updateNodeKey(lIP, rIP, spis, d.keys, newIdx, priIdx, delIdx), false
})
d.Lock()
// swap primary
if priIdx != -1 {
swp := d.keys[0]
d.keys[0] = d.keys[priIdx]
d.keys[priIdx] = swp
}
// prune
if delIdx != -1 {
if delIdx == 0 {
delIdx = priIdx
}
d.keys = append(d.keys[:delIdx], d.keys[delIdx+1:]...)
}
d.Unlock()
logrus.Debugf("Updated: %v", d.keys)
return nil
}
/********************************************************
* Steady state: rSA0, rSA1, rSA2, fSA1, fSP1
* Rotation --> -rSA0, +rSA3, +fSA2, +fSP2/-fSP1, -fSA1
* Steady state: rSA1, rSA2, rSA3, fSA2, fSP2
*********************************************************/
// Spis and keys are sorted in such away the one in position 0 is the primary
func updateNodeKey(lIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx, delIdx int) []*spi {
logrus.Debugf("Updating keys for node: %s (%d,%d,%d)", rIP, newIdx, priIdx, delIdx)
return nil
}
func (n *network) maxMTU() int {
mtu := 1500
if n.mtu != 0 {
mtu = n.mtu
}
mtu -= vxlanEncap
if n.secure {
// In case of encryption account for the
// esp packet espansion and padding
mtu -= pktExpansion
mtu -= (mtu % 4)
}
return mtu
}

View file

@ -1,188 +0,0 @@
package overlay
import (
"fmt"
"net"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/types"
"github.com/gogo/protobuf/proto"
"github.com/sirupsen/logrus"
)
// Join method is invoked when a Sandbox is attached to an endpoint.
func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
if err := validateID(nid, eid); err != nil {
return err
}
n := d.network(nid)
if n == nil {
return fmt.Errorf("could not find network with id %s", nid)
}
ep := n.endpoint(eid)
if ep == nil {
return fmt.Errorf("could not find endpoint with id %s", eid)
}
if n.secure && len(d.keys) == 0 {
return fmt.Errorf("cannot join secure network: encryption keys not present")
}
s := n.getSubnetforIP(ep.addr)
if s == nil {
return fmt.Errorf("could not find subnet for endpoint %s", eid)
}
if err := n.obtainVxlanID(s); err != nil {
return fmt.Errorf("couldn't get vxlan id for %q: %v", s.subnetIP.String(), err)
}
if err := n.joinSandbox(false); err != nil {
return fmt.Errorf("network sandbox join failed: %v", err)
}
if err := n.joinSubnetSandbox(s, false); err != nil {
return fmt.Errorf("subnet sandbox join failed for %q: %v", s.subnetIP.String(), err)
}
// joinSubnetSandbox gets called when an endpoint comes up on a new subnet in the
// overlay network. Hence the Endpoint count should be updated outside joinSubnetSandbox
n.incEndpointCount()
// Add creating a veth Pair for Solaris
containerIfName := "solaris-if"
ep.ifName = containerIfName
if err := d.writeEndpointToStore(ep); err != nil {
return fmt.Errorf("failed to update overlay endpoint %s to local data store: %v", ep.id[0:7], err)
}
// Add solaris plumbing to add veth (with ep mac addr) to sandbox
for _, sub := range n.subnets {
if sub == s {
continue
}
if err := jinfo.AddStaticRoute(sub.subnetIP, types.NEXTHOP, s.gwIP.IP); err != nil {
logrus.Errorf("Adding subnet %s static route in network %q failed\n", s.subnetIP, n.id)
}
}
if iNames := jinfo.InterfaceName(); iNames != nil {
err := iNames.SetNames(containerIfName, "eth")
if err != nil {
return err
}
}
d.peerDbAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac,
net.ParseIP(d.advertiseAddress), true)
if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
logrus.Warn(err)
}
buf, err := proto.Marshal(&PeerRecord{
EndpointIP: ep.addr.String(),
EndpointMAC: ep.mac.String(),
TunnelEndpointIP: d.advertiseAddress,
})
if err != nil {
return err
}
if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil {
logrus.Errorf("overlay: Failed adding table entry to joininfo: %v", err)
}
d.pushLocalEndpointEvent("join", nid, eid)
return nil
}
func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) {
if tableName != ovPeerTable {
logrus.Errorf("Unexpected table notification for table %s received", tableName)
return
}
eid := key
var peer PeerRecord
if err := proto.Unmarshal(value, &peer); err != nil {
logrus.Errorf("Failed to unmarshal peer record: %v", err)
return
}
// Ignore local peers. We already know about them and they
// should not be added to vxlan fdb.
if peer.TunnelEndpointIP == d.advertiseAddress {
return
}
addr, err := types.ParseCIDR(peer.EndpointIP)
if err != nil {
logrus.Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP)
return
}
mac, err := net.ParseMAC(peer.EndpointMAC)
if err != nil {
logrus.Errorf("Invalid mac %s received in event notify", peer.EndpointMAC)
return
}
vtep := net.ParseIP(peer.TunnelEndpointIP)
if vtep == nil {
logrus.Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP)
return
}
if etype == driverapi.Delete {
d.peerDelete(nid, eid, addr.IP, addr.Mask, mac, vtep, true)
return
}
d.peerAdd(nid, eid, addr.IP, addr.Mask, mac, vtep, true)
}
func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) {
return "", nil
}
// Leave method is invoked when a Sandbox detaches from an endpoint.
func (d *driver) Leave(nid, eid string) error {
if err := validateID(nid, eid); err != nil {
return err
}
n := d.network(nid)
if n == nil {
return fmt.Errorf("could not find network with id %s", nid)
}
ep := n.endpoint(eid)
if ep == nil {
return types.InternalMaskableErrorf("could not find endpoint with id %s", eid)
}
if d.notifyCh != nil {
d.notifyCh <- ovNotify{
action: "leave",
nw: n,
ep: ep,
}
}
n.leaveSandbox()
if err := d.checkEncryption(nid, nil, 0, true, false); err != nil {
logrus.Warn(err)
}
return nil
}

View file

@ -1,242 +0,0 @@
package overlay
import (
"encoding/json"
"fmt"
"net"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/netutils"
"github.com/docker/libnetwork/types"
"github.com/sirupsen/logrus"
)
type endpointTable map[string]*endpoint
const overlayEndpointPrefix = "overlay/endpoint"
type endpoint struct {
id string
nid string
ifName string
mac net.HardwareAddr
addr *net.IPNet
dbExists bool
dbIndex uint64
}
func (n *network) endpoint(eid string) *endpoint {
n.Lock()
defer n.Unlock()
return n.endpoints[eid]
}
func (n *network) addEndpoint(ep *endpoint) {
n.Lock()
n.endpoints[ep.id] = ep
n.Unlock()
}
func (n *network) deleteEndpoint(eid string) {
n.Lock()
delete(n.endpoints, eid)
n.Unlock()
}
func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
epOptions map[string]interface{}) error {
var err error
if err = validateID(nid, eid); err != nil {
return err
}
// Since we perform lazy configuration make sure we try
// configuring the driver when we enter CreateEndpoint since
// CreateNetwork may not be called in every node.
if err := d.configure(); err != nil {
return err
}
n := d.network(nid)
if n == nil {
return fmt.Errorf("network id %q not found", nid)
}
ep := &endpoint{
id: eid,
nid: n.id,
addr: ifInfo.Address(),
mac: ifInfo.MacAddress(),
}
if ep.addr == nil {
return fmt.Errorf("create endpoint was not passed interface IP address")
}
if s := n.getSubnetforIP(ep.addr); s == nil {
return fmt.Errorf("no matching subnet for IP %q in network %q", ep.addr, nid)
}
if ep.mac == nil {
ep.mac = netutils.GenerateMACFromIP(ep.addr.IP)
if err := ifInfo.SetMacAddress(ep.mac); err != nil {
return err
}
}
n.addEndpoint(ep)
if err := d.writeEndpointToStore(ep); err != nil {
return fmt.Errorf("failed to update overlay endpoint %s to local store: %v", ep.id[0:7], err)
}
return nil
}
func (d *driver) DeleteEndpoint(nid, eid string) error {
if err := validateID(nid, eid); err != nil {
return err
}
n := d.network(nid)
if n == nil {
return fmt.Errorf("network id %q not found", nid)
}
ep := n.endpoint(eid)
if ep == nil {
return fmt.Errorf("endpoint id %q not found", eid)
}
n.deleteEndpoint(eid)
if err := d.deleteEndpointFromStore(ep); err != nil {
logrus.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err)
}
if ep.ifName == "" {
return nil
}
// OVERLAY_SOLARIS: Add Solaris unplumbing for removing the interface endpoint
return nil
}
func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) {
return make(map[string]interface{}, 0), nil
}
func (d *driver) deleteEndpointFromStore(e *endpoint) error {
if d.localStore == nil {
return fmt.Errorf("overlay local store not initialized, ep not deleted")
}
return d.localStore.DeleteObjectAtomic(e)
}
func (d *driver) writeEndpointToStore(e *endpoint) error {
if d.localStore == nil {
return fmt.Errorf("overlay local store not initialized, ep not added")
}
return d.localStore.PutObjectAtomic(e)
}
func (ep *endpoint) DataScope() string {
return datastore.LocalScope
}
func (ep *endpoint) New() datastore.KVObject {
return &endpoint{}
}
func (ep *endpoint) CopyTo(o datastore.KVObject) error {
dstep := o.(*endpoint)
*dstep = *ep
return nil
}
func (ep *endpoint) Key() []string {
return []string{overlayEndpointPrefix, ep.id}
}
func (ep *endpoint) KeyPrefix() []string {
return []string{overlayEndpointPrefix}
}
func (ep *endpoint) Index() uint64 {
return ep.dbIndex
}
func (ep *endpoint) SetIndex(index uint64) {
ep.dbIndex = index
ep.dbExists = true
}
func (ep *endpoint) Exists() bool {
return ep.dbExists
}
func (ep *endpoint) Skip() bool {
return false
}
func (ep *endpoint) Value() []byte {
b, err := json.Marshal(ep)
if err != nil {
return nil
}
return b
}
func (ep *endpoint) SetValue(value []byte) error {
return json.Unmarshal(value, ep)
}
func (ep *endpoint) MarshalJSON() ([]byte, error) {
epMap := make(map[string]interface{})
epMap["id"] = ep.id
epMap["nid"] = ep.nid
if ep.ifName != "" {
epMap["ifName"] = ep.ifName
}
if ep.addr != nil {
epMap["addr"] = ep.addr.String()
}
if len(ep.mac) != 0 {
epMap["mac"] = ep.mac.String()
}
return json.Marshal(epMap)
}
func (ep *endpoint) UnmarshalJSON(value []byte) error {
var (
err error
epMap map[string]interface{}
)
json.Unmarshal(value, &epMap)
ep.id = epMap["id"].(string)
ep.nid = epMap["nid"].(string)
if v, ok := epMap["mac"]; ok {
if ep.mac, err = net.ParseMAC(v.(string)); err != nil {
return types.InternalErrorf("failed to decode endpoint interface mac address after json unmarshal: %s", v.(string))
}
}
if v, ok := epMap["addr"]; ok {
if ep.addr, err = types.ParseCIDR(v.(string)); err != nil {
return types.InternalErrorf("failed to decode endpoint interface ipv4 address after json unmarshal: %v", err)
}
}
if v, ok := epMap["ifName"]; ok {
ep.ifName = v.(string)
}
return nil
}

View file

@ -1,786 +0,0 @@
package overlay
import (
"encoding/json"
"fmt"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/netutils"
"github.com/docker/libnetwork/osl"
"github.com/docker/libnetwork/resolvconf"
"github.com/docker/libnetwork/types"
"github.com/sirupsen/logrus"
)
var (
hostMode bool
networkOnce sync.Once
networkMu sync.Mutex
vniTbl = make(map[uint32]string)
)
type networkTable map[string]*network
type subnet struct {
once *sync.Once
vxlanName string
brName string
vni uint32
initErr error
subnetIP *net.IPNet
gwIP *net.IPNet
}
type subnetJSON struct {
SubnetIP string
GwIP string
Vni uint32
}
type network struct {
id string
dbIndex uint64
dbExists bool
sbox osl.Sandbox
endpoints endpointTable
driver *driver
joinCnt int
once *sync.Once
initEpoch int
initErr error
subnets []*subnet
secure bool
mtu int
sync.Mutex
}
func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) {
return nil, types.NotImplementedErrorf("not implemented")
}
func (d *driver) NetworkFree(id string) error {
return types.NotImplementedErrorf("not implemented")
}
func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error {
if id == "" {
return fmt.Errorf("invalid network id")
}
if len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == "0.0.0.0/0" {
return types.BadRequestErrorf("ipv4 pool is empty")
}
// Since we perform lazy configuration make sure we try
// configuring the driver when we enter CreateNetwork
if err := d.configure(); err != nil {
return err
}
n := &network{
id: id,
driver: d,
endpoints: endpointTable{},
once: &sync.Once{},
subnets: []*subnet{},
}
vnis := make([]uint32, 0, len(ipV4Data))
if gval, ok := option[netlabel.GenericData]; ok {
optMap := gval.(map[string]string)
if val, ok := optMap[netlabel.OverlayVxlanIDList]; ok {
logrus.Debugf("overlay: Received vxlan IDs: %s", val)
vniStrings := strings.Split(val, ",")
for _, vniStr := range vniStrings {
vni, err := strconv.Atoi(vniStr)
if err != nil {
return fmt.Errorf("invalid vxlan id value %q passed", vniStr)
}
vnis = append(vnis, uint32(vni))
}
}
if _, ok := optMap[secureOption]; ok {
n.secure = true
}
if val, ok := optMap[netlabel.DriverMTU]; ok {
var err error
if n.mtu, err = strconv.Atoi(val); err != nil {
return fmt.Errorf("failed to parse %v: %v", val, err)
}
if n.mtu < 0 {
return fmt.Errorf("invalid MTU value: %v", n.mtu)
}
}
}
// If we are getting vnis from libnetwork, either we get for
// all subnets or none.
if len(vnis) != 0 && len(vnis) < len(ipV4Data) {
return fmt.Errorf("insufficient vnis(%d) passed to overlay", len(vnis))
}
for i, ipd := range ipV4Data {
s := &subnet{
subnetIP: ipd.Pool,
gwIP: ipd.Gateway,
once: &sync.Once{},
}
if len(vnis) != 0 {
s.vni = vnis[i]
}
n.subnets = append(n.subnets, s)
}
if err := n.writeToStore(); err != nil {
return fmt.Errorf("failed to update data store for network %v: %v", n.id, err)
}
// Make sure no rule is on the way from any stale secure network
if !n.secure {
for _, vni := range vnis {
programMangle(vni, false)
}
}
if nInfo != nil {
if err := nInfo.TableEventRegister(ovPeerTable, driverapi.EndpointObject); err != nil {
return err
}
}
d.addNetwork(n)
return nil
}
func (d *driver) DeleteNetwork(nid string) error {
if nid == "" {
return fmt.Errorf("invalid network id")
}
// Make sure driver resources are initialized before proceeding
if err := d.configure(); err != nil {
return err
}
n := d.network(nid)
if n == nil {
return fmt.Errorf("could not find network with id %s", nid)
}
d.deleteNetwork(nid)
vnis, err := n.releaseVxlanID()
if err != nil {
return err
}
if n.secure {
for _, vni := range vnis {
programMangle(vni, false)
}
}
return nil
}
func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error {
return nil
}
func (d *driver) RevokeExternalConnectivity(nid, eid string) error {
return nil
}
func (n *network) incEndpointCount() {
n.Lock()
defer n.Unlock()
n.joinCnt++
}
func (n *network) joinSandbox(restore bool) error {
// If there is a race between two go routines here only one will win
// the other will wait.
n.once.Do(func() {
// save the error status of initSandbox in n.initErr so that
// all the racing go routines are able to know the status.
n.initErr = n.initSandbox(restore)
})
return n.initErr
}
func (n *network) joinSubnetSandbox(s *subnet, restore bool) error {
s.once.Do(func() {
s.initErr = n.initSubnetSandbox(s, restore)
})
return s.initErr
}
func (n *network) leaveSandbox() {
n.Lock()
defer n.Unlock()
n.joinCnt--
if n.joinCnt != 0 {
return
}
// We are about to destroy sandbox since the container is leaving the network
// Reinitialize the once variable so that we will be able to trigger one time
// sandbox initialization(again) when another container joins subsequently.
n.once = &sync.Once{}
for _, s := range n.subnets {
s.once = &sync.Once{}
}
n.destroySandbox()
}
// to be called while holding network lock
func (n *network) destroySandbox() {
if n.sbox != nil {
for _, iface := range n.sbox.Info().Interfaces() {
if err := iface.Remove(); err != nil {
logrus.Debugf("Remove interface %s failed: %v", iface.SrcName(), err)
}
}
for _, s := range n.subnets {
if s.vxlanName != "" {
err := deleteInterface(s.vxlanName)
if err != nil {
logrus.Warnf("could not cleanup sandbox properly: %v", err)
}
}
}
n.sbox.Destroy()
n.sbox = nil
}
}
func networkOnceInit() {
if os.Getenv("_OVERLAY_HOST_MODE") != "" {
hostMode = true
return
}
err := createVxlan("testvxlan1", 1, 0)
if err != nil {
logrus.Errorf("Failed to create testvxlan1 interface: %v", err)
return
}
defer deleteInterface("testvxlan1")
}
func (n *network) generateVxlanName(s *subnet) string {
id := n.id
if len(n.id) > 12 {
id = n.id[:12]
}
return "vx_" + id + "_0"
}
func (n *network) generateBridgeName(s *subnet) string {
id := n.id
if len(n.id) > 5 {
id = n.id[:5]
}
return n.getBridgeNamePrefix(s) + "_" + id + "_0"
}
func (n *network) getBridgeNamePrefix(s *subnet) string {
return "ov_" + fmt.Sprintf("%06x", n.vxlanID(s))
}
func isOverlap(nw *net.IPNet) bool {
var nameservers []string
if rc, err := resolvconf.Get(); err == nil {
nameservers = resolvconf.GetNameserversAsCIDR(rc.Content)
}
if err := netutils.CheckNameserverOverlaps(nameservers, nw); err != nil {
return true
}
if err := netutils.CheckRouteOverlaps(nw); err != nil {
return true
}
return false
}
func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) error {
sbox := n.sandbox()
// restore overlay osl sandbox
Ifaces := make(map[string][]osl.IfaceOption)
brIfaceOption := make([]osl.IfaceOption, 2)
brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Address(s.gwIP))
brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Bridge(true))
Ifaces[brName+"+br"] = brIfaceOption
err := sbox.Restore(Ifaces, nil, nil, nil)
if err != nil {
return err
}
Ifaces = make(map[string][]osl.IfaceOption)
vxlanIfaceOption := make([]osl.IfaceOption, 1)
vxlanIfaceOption = append(vxlanIfaceOption, sbox.InterfaceOptions().Master(brName))
Ifaces[vxlanName+"+vxlan"] = vxlanIfaceOption
err = sbox.Restore(Ifaces, nil, nil, nil)
if err != nil {
return err
}
return nil
}
func (n *network) addInterface(srcName, dstPrefix, name string, isBridge bool) error {
return nil
}
func (n *network) setupSubnetSandbox(s *subnet, brName, vxlanName string) error {
if hostMode {
// Try to delete stale bridge interface if it exists
if err := deleteInterface(brName); err != nil {
deleteInterfaceBySubnet(n.getBridgeNamePrefix(s), s)
}
if isOverlap(s.subnetIP) {
return fmt.Errorf("overlay subnet %s has conflicts in the host while running in host mode", s.subnetIP.String())
}
}
if !hostMode {
// Try to find this subnet's vni is being used in some
// other namespace by looking at vniTbl that we just
// populated in the once init. If a hit is found then
// it must a stale namespace from previous
// life. Destroy it completely and reclaim resourced.
networkMu.Lock()
path, ok := vniTbl[n.vxlanID(s)]
networkMu.Unlock()
if ok {
os.Remove(path)
networkMu.Lock()
delete(vniTbl, n.vxlanID(s))
networkMu.Unlock()
}
}
err := createVxlan(vxlanName, n.vxlanID(s), n.maxMTU())
if err != nil {
return err
}
return nil
}
func (n *network) initSubnetSandbox(s *subnet, restore bool) error {
brName := n.generateBridgeName(s)
vxlanName := n.generateVxlanName(s)
if restore {
n.restoreSubnetSandbox(s, brName, vxlanName)
} else {
n.setupSubnetSandbox(s, brName, vxlanName)
}
n.Lock()
s.vxlanName = vxlanName
s.brName = brName
n.Unlock()
return nil
}
func (n *network) cleanupStaleSandboxes() {
filepath.Walk(filepath.Dir(osl.GenerateKey("walk")),
func(path string, info os.FileInfo, err error) error {
_, fname := filepath.Split(path)
pList := strings.Split(fname, "-")
if len(pList) <= 1 {
return nil
}
pattern := pList[1]
if strings.Contains(n.id, pattern) {
// Now that we have destroyed this
// sandbox, remove all references to
// it in vniTbl so that we don't
// inadvertently destroy the sandbox
// created in this life.
networkMu.Lock()
for vni, tblPath := range vniTbl {
if tblPath == path {
delete(vniTbl, vni)
}
}
networkMu.Unlock()
}
return nil
})
}
func (n *network) initSandbox(restore bool) error {
n.Lock()
n.initEpoch++
n.Unlock()
networkOnce.Do(networkOnceInit)
if !restore {
// If there are any stale sandboxes related to this network
// from previous daemon life clean it up here
n.cleanupStaleSandboxes()
}
// In the restore case network sandbox already exist; but we don't know
// what epoch number it was created with. It has to be retrieved by
// searching the net namespaces.
var key string
if restore {
key = osl.GenerateKey("-" + n.id)
} else {
key = osl.GenerateKey(fmt.Sprintf("%d-", n.initEpoch) + n.id)
}
sbox, err := osl.NewSandbox(key, !hostMode, restore)
if err != nil {
return fmt.Errorf("could not get network sandbox (oper %t): %v", restore, err)
}
n.setSandbox(sbox)
if !restore {
n.driver.peerDbUpdateSandbox(n.id)
}
return nil
}
func (d *driver) addNetwork(n *network) {
d.Lock()
d.networks[n.id] = n
d.Unlock()
}
func (d *driver) deleteNetwork(nid string) {
d.Lock()
delete(d.networks, nid)
d.Unlock()
}
func (d *driver) network(nid string) *network {
d.Lock()
networks := d.networks
d.Unlock()
n, ok := networks[nid]
if !ok {
n = d.getNetworkFromStore(nid)
if n != nil {
n.driver = d
n.endpoints = endpointTable{}
n.once = &sync.Once{}
networks[nid] = n
}
}
return n
}
func (d *driver) getNetworkFromStore(nid string) *network {
if d.store == nil {
return nil
}
n := &network{id: nid}
if err := d.store.GetObject(datastore.Key(n.Key()...), n); err != nil {
return nil
}
return n
}
func (n *network) sandbox() osl.Sandbox {
n.Lock()
defer n.Unlock()
return n.sbox
}
func (n *network) setSandbox(sbox osl.Sandbox) {
n.Lock()
n.sbox = sbox
n.Unlock()
}
func (n *network) vxlanID(s *subnet) uint32 {
n.Lock()
defer n.Unlock()
return s.vni
}
func (n *network) setVxlanID(s *subnet, vni uint32) {
n.Lock()
s.vni = vni
n.Unlock()
}
func (n *network) Key() []string {
return []string{"overlay", "network", n.id}
}
func (n *network) KeyPrefix() []string {
return []string{"overlay", "network"}
}
func (n *network) Value() []byte {
m := map[string]interface{}{}
netJSON := []*subnetJSON{}
for _, s := range n.subnets {
sj := &subnetJSON{
SubnetIP: s.subnetIP.String(),
GwIP: s.gwIP.String(),
Vni: s.vni,
}
netJSON = append(netJSON, sj)
}
m["secure"] = n.secure
m["subnets"] = netJSON
m["mtu"] = n.mtu
b, err := json.Marshal(m)
if err != nil {
return []byte{}
}
return b
}
func (n *network) Index() uint64 {
return n.dbIndex
}
func (n *network) SetIndex(index uint64) {
n.dbIndex = index
n.dbExists = true
}
func (n *network) Exists() bool {
return n.dbExists
}
func (n *network) Skip() bool {
return false
}
func (n *network) SetValue(value []byte) error {
var (
m map[string]interface{}
newNet bool
isMap = true
netJSON = []*subnetJSON{}
)
if err := json.Unmarshal(value, &m); err != nil {
err := json.Unmarshal(value, &netJSON)
if err != nil {
return err
}
isMap = false
}
if len(n.subnets) == 0 {
newNet = true
}
if isMap {
if val, ok := m["secure"]; ok {
n.secure = val.(bool)
}
if val, ok := m["mtu"]; ok {
n.mtu = int(val.(float64))
}
bytes, err := json.Marshal(m["subnets"])
if err != nil {
return err
}
if err := json.Unmarshal(bytes, &netJSON); err != nil {
return err
}
}
for _, sj := range netJSON {
subnetIPstr := sj.SubnetIP
gwIPstr := sj.GwIP
vni := sj.Vni
subnetIP, _ := types.ParseCIDR(subnetIPstr)
gwIP, _ := types.ParseCIDR(gwIPstr)
if newNet {
s := &subnet{
subnetIP: subnetIP,
gwIP: gwIP,
vni: vni,
once: &sync.Once{},
}
n.subnets = append(n.subnets, s)
} else {
sNet := n.getMatchingSubnet(subnetIP)
if sNet != nil {
sNet.vni = vni
}
}
}
return nil
}
func (n *network) DataScope() string {
return datastore.GlobalScope
}
func (n *network) writeToStore() error {
if n.driver.store == nil {
return nil
}
return n.driver.store.PutObjectAtomic(n)
}
func (n *network) releaseVxlanID() ([]uint32, error) {
if len(n.subnets) == 0 {
return nil, nil
}
if n.driver.store != nil {
if err := n.driver.store.DeleteObjectAtomic(n); err != nil {
if err == datastore.ErrKeyModified || err == datastore.ErrKeyNotFound {
// In both the above cases we can safely assume that the key has been removed by some other
// instance and so simply get out of here
return nil, nil
}
return nil, fmt.Errorf("failed to delete network to vxlan id map: %v", err)
}
}
var vnis []uint32
for _, s := range n.subnets {
if n.driver.vxlanIdm != nil {
vni := n.vxlanID(s)
vnis = append(vnis, vni)
n.driver.vxlanIdm.Release(uint64(vni))
}
n.setVxlanID(s, 0)
}
return vnis, nil
}
func (n *network) obtainVxlanID(s *subnet) error {
//return if the subnet already has a vxlan id assigned
if s.vni != 0 {
return nil
}
if n.driver.store == nil {
return fmt.Errorf("no valid vxlan id and no datastore configured, cannot obtain vxlan id")
}
for {
if err := n.driver.store.GetObject(datastore.Key(n.Key()...), n); err != nil {
return fmt.Errorf("getting network %q from datastore failed %v", n.id, err)
}
if s.vni == 0 {
vxlanID, err := n.driver.vxlanIdm.GetID(true)
if err != nil {
return fmt.Errorf("failed to allocate vxlan id: %v", err)
}
n.setVxlanID(s, uint32(vxlanID))
if err := n.writeToStore(); err != nil {
n.driver.vxlanIdm.Release(uint64(n.vxlanID(s)))
n.setVxlanID(s, 0)
if err == datastore.ErrKeyModified {
continue
}
return fmt.Errorf("network %q failed to update data store: %v", n.id, err)
}
return nil
}
return nil
}
}
// contains return true if the passed ip belongs to one the network's
// subnets
func (n *network) contains(ip net.IP) bool {
for _, s := range n.subnets {
if s.subnetIP.Contains(ip) {
return true
}
}
return false
}
// getSubnetforIP returns the subnet to which the given IP belongs
func (n *network) getSubnetforIP(ip *net.IPNet) *subnet {
for _, s := range n.subnets {
// first check if the mask lengths are the same
i, _ := s.subnetIP.Mask.Size()
j, _ := ip.Mask.Size()
if i != j {
continue
}
if s.subnetIP.Contains(ip.IP) {
return s
}
}
return nil
}
// getMatchingSubnet return the network's subnet that matches the input
func (n *network) getMatchingSubnet(ip *net.IPNet) *subnet {
if ip == nil {
return nil
}
for _, s := range n.subnets {
// first check if the mask lengths are the same
i, _ := s.subnetIP.Mask.Size()
j, _ := ip.Mask.Size()
if i != j {
continue
}
if s.subnetIP.IP.Equal(ip.IP) {
return s
}
}
return nil
}

View file

@ -1,233 +0,0 @@
package overlay
import (
"fmt"
"net"
"strings"
"time"
"github.com/hashicorp/serf/serf"
"github.com/sirupsen/logrus"
)
type ovNotify struct {
action string
ep *endpoint
nw *network
}
type logWriter struct{}
func (l *logWriter) Write(p []byte) (int, error) {
str := string(p)
switch {
case strings.Contains(str, "[WARN]"):
logrus.Warn(str)
case strings.Contains(str, "[DEBUG]"):
logrus.Debug(str)
case strings.Contains(str, "[INFO]"):
logrus.Info(str)
case strings.Contains(str, "[ERR]"):
logrus.Error(str)
}
return len(p), nil
}
func (d *driver) serfInit() error {
var err error
config := serf.DefaultConfig()
config.Init()
config.MemberlistConfig.BindAddr = d.advertiseAddress
d.eventCh = make(chan serf.Event, 4)
config.EventCh = d.eventCh
config.UserCoalescePeriod = 1 * time.Second
config.UserQuiescentPeriod = 50 * time.Millisecond
config.LogOutput = &logWriter{}
config.MemberlistConfig.LogOutput = config.LogOutput
s, err := serf.Create(config)
if err != nil {
return fmt.Errorf("failed to create cluster node: %v", err)
}
defer func() {
if err != nil {
s.Shutdown()
}
}()
d.serfInstance = s
d.notifyCh = make(chan ovNotify)
d.exitCh = make(chan chan struct{})
go d.startSerfLoop(d.eventCh, d.notifyCh, d.exitCh)
return nil
}
func (d *driver) serfJoin(neighIP string) error {
if neighIP == "" {
return fmt.Errorf("no neighbor to join")
}
if _, err := d.serfInstance.Join([]string{neighIP}, false); err != nil {
return fmt.Errorf("Failed to join the cluster at neigh IP %s: %v",
neighIP, err)
}
return nil
}
func (d *driver) notifyEvent(event ovNotify) {
ep := event.ep
ePayload := fmt.Sprintf("%s %s %s %s", event.action, ep.addr.IP.String(),
net.IP(ep.addr.Mask).String(), ep.mac.String())
eName := fmt.Sprintf("jl %s %s %s", d.serfInstance.LocalMember().Addr.String(),
event.nw.id, ep.id)
if err := d.serfInstance.UserEvent(eName, []byte(ePayload), true); err != nil {
logrus.Errorf("Sending user event failed: %v\n", err)
}
}
func (d *driver) processEvent(u serf.UserEvent) {
logrus.Debugf("Received user event name:%s, payload:%s\n", u.Name,
string(u.Payload))
var dummy, action, vtepStr, nid, eid, ipStr, maskStr, macStr string
if _, err := fmt.Sscan(u.Name, &dummy, &vtepStr, &nid, &eid); err != nil {
fmt.Printf("Failed to scan name string: %v\n", err)
}
if _, err := fmt.Sscan(string(u.Payload), &action,
&ipStr, &maskStr, &macStr); err != nil {
fmt.Printf("Failed to scan value string: %v\n", err)
}
logrus.Debugf("Parsed data = %s/%s/%s/%s/%s/%s\n", nid, eid, vtepStr, ipStr, maskStr, macStr)
mac, err := net.ParseMAC(macStr)
if err != nil {
logrus.Errorf("Failed to parse mac: %v\n", err)
}
if d.serfInstance.LocalMember().Addr.String() == vtepStr {
return
}
switch action {
case "join":
if err := d.peerAdd(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac,
net.ParseIP(vtepStr), true); err != nil {
logrus.Errorf("Peer add failed in the driver: %v\n", err)
}
case "leave":
if err := d.peerDelete(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac,
net.ParseIP(vtepStr), true); err != nil {
logrus.Errorf("Peer delete failed in the driver: %v\n", err)
}
}
}
func (d *driver) processQuery(q *serf.Query) {
logrus.Debugf("Received query name:%s, payload:%s\n", q.Name,
string(q.Payload))
var nid, ipStr string
if _, err := fmt.Sscan(string(q.Payload), &nid, &ipStr); err != nil {
fmt.Printf("Failed to scan query payload string: %v\n", err)
}
peerMac, peerIPMask, vtep, err := d.peerDbSearch(nid, net.ParseIP(ipStr))
if err != nil {
return
}
q.Respond([]byte(fmt.Sprintf("%s %s %s", peerMac.String(), net.IP(peerIPMask).String(), vtep.String())))
}
func (d *driver) resolvePeer(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {
if d.serfInstance == nil {
return nil, nil, nil, fmt.Errorf("could not resolve peer: serf instance not initialized")
}
qPayload := fmt.Sprintf("%s %s", string(nid), peerIP.String())
resp, err := d.serfInstance.Query("peerlookup", []byte(qPayload), nil)
if err != nil {
return nil, nil, nil, fmt.Errorf("resolving peer by querying the cluster failed: %v", err)
}
respCh := resp.ResponseCh()
select {
case r := <-respCh:
var macStr, maskStr, vtepStr string
if _, err := fmt.Sscan(string(r.Payload), &macStr, &maskStr, &vtepStr); err != nil {
return nil, nil, nil, fmt.Errorf("bad response %q for the resolve query: %v", string(r.Payload), err)
}
mac, err := net.ParseMAC(macStr)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to parse mac: %v", err)
}
return mac, net.IPMask(net.ParseIP(maskStr).To4()), net.ParseIP(vtepStr), nil
case <-time.After(time.Second):
return nil, nil, nil, fmt.Errorf("timed out resolving peer by querying the cluster")
}
}
func (d *driver) startSerfLoop(eventCh chan serf.Event, notifyCh chan ovNotify,
exitCh chan chan struct{}) {
for {
select {
case notify, ok := <-notifyCh:
if !ok {
break
}
d.notifyEvent(notify)
case ch, ok := <-exitCh:
if !ok {
break
}
if err := d.serfInstance.Leave(); err != nil {
logrus.Errorf("failed leaving the cluster: %v\n", err)
}
d.serfInstance.Shutdown()
close(ch)
return
case e, ok := <-eventCh:
if !ok {
break
}
if e.EventType() == serf.EventQuery {
d.processQuery(e.(*serf.Query))
break
}
u, ok := e.(serf.UserEvent)
if !ok {
break
}
d.processEvent(u)
}
}
}
func (d *driver) isSerfAlive() bool {
d.Lock()
serfInstance := d.serfInstance
d.Unlock()
if serfInstance == nil || serfInstance.State() != serf.SerfAlive {
return false
}
return true
}

View file

@ -1,61 +0,0 @@
package overlay
import (
"fmt"
"os/exec"
"strings"
"github.com/docker/libnetwork/osl"
)
func validateID(nid, eid string) error {
if nid == "" {
return fmt.Errorf("invalid network id")
}
if eid == "" {
return fmt.Errorf("invalid endpoint id")
}
return nil
}
func createVxlan(name string, vni uint32, mtu int) error {
defer osl.InitOSContext()()
// Get default interface to plumb the vxlan on
routeCmd := "/usr/sbin/ipadm show-addr -p -o addrobj " +
"`/usr/sbin/route get default | /usr/bin/grep interface | " +
"/usr/bin/awk '{print $2}'`"
out, err := exec.Command("/usr/bin/bash", "-c", routeCmd).Output()
if err != nil {
return fmt.Errorf("cannot get default route: %v", err)
}
defaultInterface := strings.SplitN(string(out), "/", 2)
propList := fmt.Sprintf("interface=%s,vni=%d", defaultInterface[0], vni)
out, err = exec.Command("/usr/sbin/dladm", "create-vxlan", "-t", "-p", propList,
name).Output()
if err != nil {
return fmt.Errorf("error creating vxlan interface: %v %s", err, out)
}
return nil
}
func deleteInterfaceBySubnet(brPrefix string, s *subnet) error {
return nil
}
func deleteInterface(name string) error {
defer osl.InitOSContext()()
out, err := exec.Command("/usr/sbin/dladm", "delete-vxlan", name).Output()
if err != nil {
return fmt.Errorf("error creating vxlan interface: %v %s", err, out)
}
return nil
}

View file

@ -1,367 +0,0 @@
package overlay
//go:generate protoc -I.:../../Godeps/_workspace/src/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork/drivers/overlay,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. overlay.proto
import (
"fmt"
"net"
"sync"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/discoverapi"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/idm"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/osl"
"github.com/docker/libnetwork/types"
"github.com/hashicorp/serf/serf"
"github.com/sirupsen/logrus"
)
// XXX OVERLAY_SOLARIS
// Might need changes for names/constant values in solaris
const (
networkType = "overlay"
vethPrefix = "veth"
vethLen = 7
vxlanIDStart = 256
vxlanIDEnd = (1 << 24) - 1
vxlanPort = 4789
vxlanEncap = 50
secureOption = "encrypted"
)
var initVxlanIdm = make(chan (bool), 1)
type driver struct {
eventCh chan serf.Event
notifyCh chan ovNotify
exitCh chan chan struct{}
bindAddress string
advertiseAddress string
neighIP string
config map[string]interface{}
peerDb peerNetworkMap
secMap *encrMap
serfInstance *serf.Serf
networks networkTable
store datastore.DataStore
localStore datastore.DataStore
vxlanIdm *idm.Idm
once sync.Once
joinOnce sync.Once
keys []*key
sync.Mutex
}
// Init registers a new instance of overlay driver
func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
c := driverapi.Capability{
DataScope: datastore.GlobalScope,
ConnectivityScope: datastore.GlobalScope,
}
d := &driver{
networks: networkTable{},
peerDb: peerNetworkMap{
mp: map[string]*peerMap{},
},
secMap: &encrMap{nodes: map[string][]*spi{}},
config: config,
}
if data, ok := config[netlabel.GlobalKVClient]; ok {
var err error
dsc, ok := data.(discoverapi.DatastoreConfigData)
if !ok {
return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
}
d.store, err = datastore.NewDataStoreFromConfig(dsc)
if err != nil {
return types.InternalErrorf("failed to initialize data store: %v", err)
}
}
if data, ok := config[netlabel.LocalKVClient]; ok {
var err error
dsc, ok := data.(discoverapi.DatastoreConfigData)
if !ok {
return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
}
d.localStore, err = datastore.NewDataStoreFromConfig(dsc)
if err != nil {
return types.InternalErrorf("failed to initialize local data store: %v", err)
}
}
d.restoreEndpoints()
return dc.RegisterDriver(networkType, d, c)
}
// Endpoints are stored in the local store. Restore them and reconstruct the overlay sandbox
func (d *driver) restoreEndpoints() error {
if d.localStore == nil {
logrus.Warnf("Cannot restore overlay endpoints because local datastore is missing")
return nil
}
kvol, err := d.localStore.List(datastore.Key(overlayEndpointPrefix), &endpoint{})
if err != nil && err != datastore.ErrKeyNotFound {
return fmt.Errorf("failed to read overlay endpoint from store: %v", err)
}
if err == datastore.ErrKeyNotFound {
return nil
}
for _, kvo := range kvol {
ep := kvo.(*endpoint)
n := d.network(ep.nid)
if n == nil {
logrus.Debugf("Network (%s) not found for restored endpoint (%s)", ep.nid[0:7], ep.id[0:7])
logrus.Debugf("Deleting stale overlay endpoint (%s) from store", ep.id[0:7])
if err := d.deleteEndpointFromStore(ep); err != nil {
logrus.Debugf("Failed to delete stale overlay endpoint (%s) from store", ep.id[0:7])
}
continue
}
n.addEndpoint(ep)
s := n.getSubnetforIP(ep.addr)
if s == nil {
return fmt.Errorf("could not find subnet for endpoint %s", ep.id)
}
if err := n.joinSandbox(true); err != nil {
return fmt.Errorf("restore network sandbox failed: %v", err)
}
if err := n.joinSubnetSandbox(s, true); err != nil {
return fmt.Errorf("restore subnet sandbox failed for %q: %v", s.subnetIP.String(), err)
}
Ifaces := make(map[string][]osl.IfaceOption)
vethIfaceOption := make([]osl.IfaceOption, 1)
vethIfaceOption = append(vethIfaceOption, n.sbox.InterfaceOptions().Master(s.brName))
Ifaces["veth+veth"] = vethIfaceOption
err := n.sbox.Restore(Ifaces, nil, nil, nil)
if err != nil {
return fmt.Errorf("failed to restore overlay sandbox: %v", err)
}
n.incEndpointCount()
d.peerDbAdd(ep.nid, ep.id, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), true)
}
return nil
}
// Fini cleans up the driver resources
func Fini(drv driverapi.Driver) {
d := drv.(*driver)
if d.exitCh != nil {
waitCh := make(chan struct{})
d.exitCh <- waitCh
<-waitCh
}
}
func (d *driver) configure() error {
if d.store == nil {
return nil
}
if d.vxlanIdm == nil {
return d.initializeVxlanIdm()
}
return nil
}
func (d *driver) initializeVxlanIdm() error {
var err error
initVxlanIdm <- true
defer func() { <-initVxlanIdm }()
if d.vxlanIdm != nil {
return nil
}
d.vxlanIdm, err = idm.New(d.store, "vxlan-id", vxlanIDStart, vxlanIDEnd)
if err != nil {
return fmt.Errorf("failed to initialize vxlan id manager: %v", err)
}
return nil
}
func (d *driver) Type() string {
return networkType
}
func (d *driver) IsBuiltIn() bool {
return true
}
func validateSelf(node string) error {
advIP := net.ParseIP(node)
if advIP == nil {
return fmt.Errorf("invalid self address (%s)", node)
}
addrs, err := net.InterfaceAddrs()
if err != nil {
return fmt.Errorf("Unable to get interface addresses %v", err)
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err == nil && ip.Equal(advIP) {
return nil
}
}
return fmt.Errorf("Multi-Host overlay networking requires cluster-advertise(%s) to be configured with a local ip-address that is reachable within the cluster", advIP.String())
}
func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) {
if self && !d.isSerfAlive() {
d.Lock()
d.advertiseAddress = advertiseAddress
d.bindAddress = bindAddress
d.Unlock()
// If there is no cluster store there is no need to start serf.
if d.store != nil {
if err := validateSelf(advertiseAddress); err != nil {
logrus.Warn(err.Error())
}
err := d.serfInit()
if err != nil {
logrus.Errorf("initializing serf instance failed: %v", err)
d.Lock()
d.advertiseAddress = ""
d.bindAddress = ""
d.Unlock()
return
}
}
}
d.Lock()
if !self {
d.neighIP = advertiseAddress
}
neighIP := d.neighIP
d.Unlock()
if d.serfInstance != nil && neighIP != "" {
var err error
d.joinOnce.Do(func() {
err = d.serfJoin(neighIP)
if err == nil {
d.pushLocalDb()
}
})
if err != nil {
logrus.Errorf("joining serf neighbor %s failed: %v", advertiseAddress, err)
d.Lock()
d.joinOnce = sync.Once{}
d.Unlock()
return
}
}
}
func (d *driver) pushLocalEndpointEvent(action, nid, eid string) {
n := d.network(nid)
if n == nil {
logrus.Debugf("Error pushing local endpoint event for network %s", nid)
return
}
ep := n.endpoint(eid)
if ep == nil {
logrus.Debugf("Error pushing local endpoint event for ep %s / %s", nid, eid)
return
}
if !d.isSerfAlive() {
return
}
d.notifyCh <- ovNotify{
action: "join",
nw: n,
ep: ep,
}
}
// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster
func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error {
var err error
switch dType {
case discoverapi.NodeDiscovery:
nodeData, ok := data.(discoverapi.NodeDiscoveryData)
if !ok || nodeData.Address == "" {
return fmt.Errorf("invalid discovery data")
}
d.nodeJoin(nodeData.Address, nodeData.BindAddress, nodeData.Self)
case discoverapi.DatastoreConfig:
if d.store != nil {
return types.ForbiddenErrorf("cannot accept datastore configuration: Overlay driver has a datastore configured already")
}
dsc, ok := data.(discoverapi.DatastoreConfigData)
if !ok {
return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
}
d.store, err = datastore.NewDataStoreFromConfig(dsc)
if err != nil {
return types.InternalErrorf("failed to initialize data store: %v", err)
}
case discoverapi.EncryptionKeysConfig:
encrData, ok := data.(discoverapi.DriverEncryptionConfig)
if !ok {
return fmt.Errorf("invalid encryption key notification data")
}
keys := make([]*key, 0, len(encrData.Keys))
for i := 0; i < len(encrData.Keys); i++ {
k := &key{
value: encrData.Keys[i],
tag: uint32(encrData.Tags[i]),
}
keys = append(keys, k)
}
d.setKeys(keys)
case discoverapi.EncryptionKeysUpdate:
var newKey, delKey, priKey *key
encrData, ok := data.(discoverapi.DriverEncryptionUpdate)
if !ok {
return fmt.Errorf("invalid encryption key notification data")
}
if encrData.Key != nil {
newKey = &key{
value: encrData.Key,
tag: uint32(encrData.Tag),
}
}
if encrData.Primary != nil {
priKey = &key{
value: encrData.Primary,
tag: uint32(encrData.PrimaryTag),
}
}
if encrData.Prune != nil {
delKey = &key{
value: encrData.Prune,
tag: uint32(encrData.PruneTag),
}
}
d.updateKeys(newKey, priKey, delKey)
default:
}
return nil
}
// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster
func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error {
return nil
}

View file

@ -1,468 +0,0 @@
// Code generated by protoc-gen-gogo.
// source: overlay.proto
// DO NOT EDIT!
/*
Package overlay is a generated protocol buffer package.
It is generated from these files:
overlay.proto
It has these top-level messages:
PeerRecord
*/
package overlay
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.GoGoProtoPackageIsVersion1
// PeerRecord defines the information corresponding to a peer
// container in the overlay network.
type PeerRecord struct {
// Endpoint IP is the IP of the container attachment on the
// given overlay network.
EndpointIP string `protobuf:"bytes,1,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"`
// Endpoint MAC is the mac address of the container attachment
// on the given overlay network.
EndpointMAC string `protobuf:"bytes,2,opt,name=endpoint_mac,json=endpointMac,proto3" json:"endpoint_mac,omitempty"`
// Tunnel Endpoint IP defines the host IP for the host in
// which this container is running and can be reached by
// building a tunnel to that host IP.
TunnelEndpointIP string `protobuf:"bytes,3,opt,name=tunnel_endpoint_ip,json=tunnelEndpointIp,proto3" json:"tunnel_endpoint_ip,omitempty"`
}
func (m *PeerRecord) Reset() { *m = PeerRecord{} }
func (*PeerRecord) ProtoMessage() {}
func (*PeerRecord) Descriptor() ([]byte, []int) { return fileDescriptorOverlay, []int{0} }
func init() {
proto.RegisterType((*PeerRecord)(nil), "overlay.PeerRecord")
}
func (this *PeerRecord) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&overlay.PeerRecord{")
s = append(s, "EndpointIP: "+fmt.Sprintf("%#v", this.EndpointIP)+",\n")
s = append(s, "EndpointMAC: "+fmt.Sprintf("%#v", this.EndpointMAC)+",\n")
s = append(s, "TunnelEndpointIP: "+fmt.Sprintf("%#v", this.TunnelEndpointIP)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringOverlay(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func extensionToGoStringOverlay(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
if e == nil {
return "nil"
}
s := "map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "}"
return s
}
func (m *PeerRecord) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *PeerRecord) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.EndpointIP) > 0 {
data[i] = 0xa
i++
i = encodeVarintOverlay(data, i, uint64(len(m.EndpointIP)))
i += copy(data[i:], m.EndpointIP)
}
if len(m.EndpointMAC) > 0 {
data[i] = 0x12
i++
i = encodeVarintOverlay(data, i, uint64(len(m.EndpointMAC)))
i += copy(data[i:], m.EndpointMAC)
}
if len(m.TunnelEndpointIP) > 0 {
data[i] = 0x1a
i++
i = encodeVarintOverlay(data, i, uint64(len(m.TunnelEndpointIP)))
i += copy(data[i:], m.TunnelEndpointIP)
}
return i, nil
}
func encodeFixed64Overlay(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Overlay(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintOverlay(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *PeerRecord) Size() (n int) {
var l int
_ = l
l = len(m.EndpointIP)
if l > 0 {
n += 1 + l + sovOverlay(uint64(l))
}
l = len(m.EndpointMAC)
if l > 0 {
n += 1 + l + sovOverlay(uint64(l))
}
l = len(m.TunnelEndpointIP)
if l > 0 {
n += 1 + l + sovOverlay(uint64(l))
}
return n
}
func sovOverlay(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozOverlay(x uint64) (n int) {
return sovOverlay(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *PeerRecord) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PeerRecord{`,
`EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`,
`EndpointMAC:` + fmt.Sprintf("%v", this.EndpointMAC) + `,`,
`TunnelEndpointIP:` + fmt.Sprintf("%v", this.TunnelEndpointIP) + `,`,
`}`,
}, "")
return s
}
func valueToStringOverlay(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *PeerRecord) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOverlay
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PeerRecord: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PeerRecord: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndpointIP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOverlay
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthOverlay
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndpointIP = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndpointMAC", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOverlay
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthOverlay
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndpointMAC = string(data[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TunnelEndpointIP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOverlay
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthOverlay
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TunnelEndpointIP = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipOverlay(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthOverlay
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipOverlay(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowOverlay
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowOverlay
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowOverlay
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthOverlay
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowOverlay
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipOverlay(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthOverlay = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowOverlay = fmt.Errorf("proto: integer overflow")
)
var fileDescriptorOverlay = []byte{
// 195 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x2f, 0x4b, 0x2d,
0xca, 0x49, 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0x72, 0xa5, 0x44, 0xd2,
0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x69, 0x2b, 0x23, 0x17, 0x57, 0x40,
0x6a, 0x6a, 0x51, 0x50, 0x6a, 0x72, 0x7e, 0x51, 0x8a, 0x90, 0x3e, 0x17, 0x77, 0x6a, 0x5e, 0x4a,
0x41, 0x7e, 0x66, 0x5e, 0x49, 0x7c, 0x66, 0x81, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x13, 0xdf,
0xa3, 0x7b, 0xf2, 0x5c, 0xae, 0x50, 0x61, 0xcf, 0x80, 0x20, 0x2e, 0x98, 0x12, 0xcf, 0x02, 0x21,
0x23, 0x2e, 0x1e, 0xb8, 0x86, 0xdc, 0xc4, 0x64, 0x09, 0x26, 0xb0, 0x0e, 0x7e, 0xa0, 0x0e, 0x6e,
0x98, 0x0e, 0x5f, 0x47, 0xe7, 0x20, 0xb8, 0xa9, 0xbe, 0x89, 0xc9, 0x42, 0x4e, 0x5c, 0x42, 0x25,
0xa5, 0x79, 0x79, 0xa9, 0x39, 0xf1, 0xc8, 0x76, 0x31, 0x83, 0x75, 0x8a, 0x00, 0x75, 0x0a, 0x84,
0x80, 0x65, 0x91, 0x6c, 0x14, 0x28, 0x41, 0x15, 0x29, 0x70, 0x92, 0xb8, 0xf1, 0x50, 0x8e, 0xe1,
0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x80, 0xf8, 0x02, 0x10, 0x3f, 0x00, 0xe2,
0x24, 0x36, 0xb0, 0xc7, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xd7, 0x7d, 0x7d, 0x08,
0x01, 0x00, 0x00,
}

View file

@ -1,27 +0,0 @@
syntax = "proto3";
import "gogoproto/gogo.proto";
package overlay;
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.stringer_all) = true;
option (gogoproto.gostring_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.goproto_stringer_all) = false;
// PeerRecord defines the information corresponding to a peer
// container in the overlay network.
message PeerRecord {
// Endpoint IP is the IP of the container attachment on the
// given overlay network.
string endpoint_ip = 1 [(gogoproto.customname) = "EndpointIP"];
// Endpoint MAC is the mac address of the container attachment
// on the given overlay network.
string endpoint_mac = 2 [(gogoproto.customname) = "EndpointMAC"];
// Tunnel Endpoint IP defines the host IP for the host in
// which this container is running and can be reached by
// building a tunnel to that host IP.
string tunnel_endpoint_ip = 3 [(gogoproto.customname) = "TunnelEndpointIP"];
}

View file

@ -1,336 +0,0 @@
package overlay
import (
"fmt"
"net"
"sync"
"github.com/sirupsen/logrus"
)
const ovPeerTable = "overlay_peer_table"
type peerKey struct {
peerIP net.IP
peerMac net.HardwareAddr
}
type peerEntry struct {
eid string
vtep net.IP
peerIPMask net.IPMask
inSandbox bool
isLocal bool
}
type peerMap struct {
mp map[string]peerEntry
sync.Mutex
}
type peerNetworkMap struct {
mp map[string]*peerMap
sync.Mutex
}
func (pKey peerKey) String() string {
return fmt.Sprintf("%s %s", pKey.peerIP, pKey.peerMac)
}
func (pKey *peerKey) Scan(state fmt.ScanState, verb rune) error {
ipB, err := state.Token(true, nil)
if err != nil {
return err
}
pKey.peerIP = net.ParseIP(string(ipB))
macB, err := state.Token(true, nil)
if err != nil {
return err
}
pKey.peerMac, err = net.ParseMAC(string(macB))
if err != nil {
return err
}
return nil
}
var peerDbWg sync.WaitGroup
func (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error {
d.peerDb.Lock()
nids := []string{}
for nid := range d.peerDb.mp {
nids = append(nids, nid)
}
d.peerDb.Unlock()
for _, nid := range nids {
d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
return f(nid, pKey, pEntry)
})
}
return nil
}
func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool) error {
d.peerDb.Lock()
pMap, ok := d.peerDb.mp[nid]
if !ok {
d.peerDb.Unlock()
return nil
}
d.peerDb.Unlock()
pMap.Lock()
for pKeyStr, pEntry := range pMap.mp {
var pKey peerKey
if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {
logrus.Warnf("Peer key scan on network %s failed: %v", nid, err)
}
if f(&pKey, &pEntry) {
pMap.Unlock()
return nil
}
}
pMap.Unlock()
return nil
}
func (d *driver) peerDbSearch(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {
var (
peerMac net.HardwareAddr
vtep net.IP
peerIPMask net.IPMask
found bool
)
err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
if pKey.peerIP.Equal(peerIP) {
peerMac = pKey.peerMac
peerIPMask = pEntry.peerIPMask
vtep = pEntry.vtep
found = true
return found
}
return found
})
if err != nil {
return nil, nil, nil, fmt.Errorf("peerdb search for peer ip %q failed: %v", peerIP, err)
}
if !found {
return nil, nil, nil, fmt.Errorf("peer ip %q not found in peerdb", peerIP)
}
return peerMac, peerIPMask, vtep, nil
}
func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
peerMac net.HardwareAddr, vtep net.IP, isLocal bool) {
peerDbWg.Wait()
d.peerDb.Lock()
pMap, ok := d.peerDb.mp[nid]
if !ok {
d.peerDb.mp[nid] = &peerMap{
mp: make(map[string]peerEntry),
}
pMap = d.peerDb.mp[nid]
}
d.peerDb.Unlock()
pKey := peerKey{
peerIP: peerIP,
peerMac: peerMac,
}
pEntry := peerEntry{
eid: eid,
vtep: vtep,
peerIPMask: peerIPMask,
isLocal: isLocal,
}
pMap.Lock()
pMap.mp[pKey.String()] = pEntry
pMap.Unlock()
}
func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
peerMac net.HardwareAddr, vtep net.IP) {
peerDbWg.Wait()
d.peerDb.Lock()
pMap, ok := d.peerDb.mp[nid]
if !ok {
d.peerDb.Unlock()
return
}
d.peerDb.Unlock()
pKey := peerKey{
peerIP: peerIP,
peerMac: peerMac,
}
pMap.Lock()
delete(pMap.mp, pKey.String())
pMap.Unlock()
}
func (d *driver) peerDbUpdateSandbox(nid string) {
d.peerDb.Lock()
pMap, ok := d.peerDb.mp[nid]
if !ok {
d.peerDb.Unlock()
return
}
d.peerDb.Unlock()
peerDbWg.Add(1)
var peerOps []func()
pMap.Lock()
for pKeyStr, pEntry := range pMap.mp {
var pKey peerKey
if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {
fmt.Printf("peer key scan failed: %v", err)
}
if pEntry.isLocal {
continue
}
// Go captures variables by reference. The pEntry could be
// pointing to the same memory location for every iteration. Make
// a copy of pEntry before capturing it in the following closure.
entry := pEntry
op := func() {
if err := d.peerAdd(nid, entry.eid, pKey.peerIP, entry.peerIPMask,
pKey.peerMac, entry.vtep,
false); err != nil {
fmt.Printf("peerdbupdate in sandbox failed for ip %s and mac %s: %v",
pKey.peerIP, pKey.peerMac, err)
}
}
peerOps = append(peerOps, op)
}
pMap.Unlock()
for _, op := range peerOps {
op()
}
peerDbWg.Done()
}
func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {
if err := validateID(nid, eid); err != nil {
return err
}
if updateDb {
d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, false)
}
n := d.network(nid)
if n == nil {
return nil
}
sbox := n.sandbox()
if sbox == nil {
return nil
}
IP := &net.IPNet{
IP: peerIP,
Mask: peerIPMask,
}
s := n.getSubnetforIP(IP)
if s == nil {
return fmt.Errorf("couldn't find the subnet %q in network %q", IP.String(), n.id)
}
if err := n.obtainVxlanID(s); err != nil {
return fmt.Errorf("couldn't get vxlan id for %q: %v", s.subnetIP.String(), err)
}
if err := n.joinSubnetSandbox(s, false); err != nil {
return fmt.Errorf("subnet sandbox join failed for %q: %v", s.subnetIP.String(), err)
}
if err := d.checkEncryption(nid, vtep, n.vxlanID(s), false, true); err != nil {
logrus.Warn(err)
}
// Add neighbor entry for the peer IP
if err := sbox.AddNeighbor(peerIP, peerMac, false, sbox.NeighborOptions().LinkName(s.vxlanName)); err != nil {
return fmt.Errorf("could not add neigbor entry into the sandbox: %v", err)
}
// XXX Add fdb entry to the bridge for the peer mac
return nil
}
func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {
if err := validateID(nid, eid); err != nil {
return err
}
if updateDb {
d.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep)
}
n := d.network(nid)
if n == nil {
return nil
}
sbox := n.sandbox()
if sbox == nil {
return nil
}
// Delete fdb entry to the bridge for the peer mac
if err := sbox.DeleteNeighbor(vtep, peerMac, true); err != nil {
return fmt.Errorf("could not delete fdb entry into the sandbox: %v", err)
}
// Delete neighbor entry for the peer IP
if err := sbox.DeleteNeighbor(peerIP, peerMac, true); err != nil {
return fmt.Errorf("could not delete neigbor entry into the sandbox: %v", err)
}
if err := d.checkEncryption(nid, vtep, 0, false, false); err != nil {
logrus.Warn(err)
}
return nil
}
func (d *driver) pushLocalDb() {
d.peerDbWalk(func(nid string, pKey *peerKey, pEntry *peerEntry) bool {
if pEntry.isLocal {
d.pushLocalEndpointEvent("join", nid, pEntry.eid)
}
return false
})
}

View file

@ -1,15 +0,0 @@
package libnetwork
import (
"github.com/docker/libnetwork/drivers/null"
"github.com/docker/libnetwork/drivers/solaris/bridge"
"github.com/docker/libnetwork/drivers/solaris/overlay"
)
func getInitializers(experimental bool) []initializer {
return []initializer{
{overlay.Init, "overlay"},
{bridge.Init, "bridge"},
{null.Init, "null"},
}
}

View file

@ -138,6 +138,15 @@ func (ec *endpointCnt) setCnt(cnt uint64) error {
}
func (ec *endpointCnt) atomicIncDecEpCnt(inc bool) error {
store := ec.n.getController().getStore(ec.DataScope())
if store == nil {
return fmt.Errorf("store not found for scope %s", ec.DataScope())
}
tmp := &endpointCnt{n: ec.n}
if err := store.GetObject(datastore.Key(ec.Key()...), tmp); err != nil {
return err
}
retry:
ec.Lock()
if inc {
@ -149,11 +158,6 @@ retry:
}
ec.Unlock()
store := ec.n.getController().getStore(ec.DataScope())
if store == nil {
return fmt.Errorf("store not found for scope %s", ec.DataScope())
}
if err := ec.n.getController().updateToStore(ec); err != nil {
if err == datastore.ErrKeyModified {
if err := store.GetObject(datastore.Key(ec.Key()...), ec); err != nil {

View file

@ -1,4 +1,4 @@
// +build linux freebsd solaris darwin
// +build linux freebsd darwin
package builtin

View file

@ -1,104 +0,0 @@
// +build solaris
package netutils
import (
"fmt"
"net"
"os/exec"
"strings"
"github.com/docker/libnetwork/ipamutils"
"github.com/vishvananda/netlink"
)
var (
networkGetRoutesFct func(netlink.Link, int) ([]netlink.Route, error)
)
// CheckRouteOverlaps checks whether the passed network overlaps with any existing routes
func CheckRouteOverlaps(toCheck *net.IPNet) error {
return nil
}
// ElectInterfaceAddresses looks for an interface on the OS with the specified name
// and returns returns all its IPv4 and IPv6 addresses in CIDR notation.
// If a failure in retrieving the addresses or no IPv4 address is found, an error is returned.
// If the interface does not exist, it chooses from a predefined
// list the first IPv4 address which does not conflict with other
// interfaces on the system.
func ElectInterfaceAddresses(name string) ([]*net.IPNet, []*net.IPNet, error) {
var (
v4Net *net.IPNet
)
out, err := exec.Command("/usr/sbin/ipadm", "show-addr",
"-p", "-o", "addrobj,addr").Output()
if err != nil {
fmt.Println("failed to list interfaces on system")
return nil, nil, err
}
alist := strings.Fields(string(out))
for _, a := range alist {
linkandaddr := strings.SplitN(a, ":", 2)
if len(linkandaddr) != 2 {
fmt.Println("failed to check interfaces on system: ", a)
continue
}
gw := fmt.Sprintf("%s_gw0", name)
link := strings.Split(linkandaddr[0], "/")[0]
addr := linkandaddr[1]
if gw != link {
continue
}
_, ipnet, err := net.ParseCIDR(addr)
if err != nil {
fmt.Println("failed to parse address: ", addr)
continue
}
v4Net = ipnet
break
}
if v4Net == nil {
v4Net, err = FindAvailableNetwork(ipamutils.PredefinedBroadNetworks)
if err != nil {
return nil, nil, err
}
}
return []*net.IPNet{v4Net}, nil, nil
}
// FindAvailableNetwork returns a network from the passed list which does not
// overlap with existing interfaces in the system
func FindAvailableNetwork(list []*net.IPNet) (*net.IPNet, error) {
out, err := exec.Command("/usr/sbin/ipadm", "show-addr",
"-p", "-o", "addr").Output()
if err != nil {
fmt.Println("failed to list interfaces on system")
return nil, err
}
ipaddrs := strings.Fields(string(out))
inuse := []*net.IPNet{}
for _, ip := range ipaddrs {
_, ipnet, err := net.ParseCIDR(ip)
if err != nil {
fmt.Println("failed to check interfaces on system: ", ip)
continue
}
inuse = append(inuse, ipnet)
}
for _, avail := range list {
is_avail := true
for _, ipnet := range inuse {
if NetworkOverlaps(avail, ipnet) {
is_avail = false
break
}
}
if is_avail {
return avail, nil
}
}
return nil, fmt.Errorf("no available network")
}

View file

@ -17,15 +17,10 @@ import (
)
const (
// The garbage collection logic for entries leverage the presence of the network.
// For this reason the expiration time of the network is put slightly higher than the entry expiration so that
// there is at least 5 extra cycle to make sure that all the entries are properly deleted before deleting the network.
reapEntryInterval = 30 * time.Minute
reapNetworkInterval = reapEntryInterval + 5*reapPeriod
reapPeriod = 5 * time.Second
retryInterval = 1 * time.Second
nodeReapInterval = 24 * time.Hour
nodeReapPeriod = 2 * time.Hour
reapPeriod = 5 * time.Second
retryInterval = 1 * time.Second
nodeReapInterval = 24 * time.Hour
nodeReapPeriod = 2 * time.Hour
)
type logWriter struct{}
@ -260,13 +255,18 @@ func (nDB *NetworkDB) triggerFunc(stagger time.Duration, C <-chan time.Time, sto
func (nDB *NetworkDB) reapDeadNode() {
nDB.Lock()
defer nDB.Unlock()
for id, n := range nDB.failedNodes {
if n.reapTime > 0 {
n.reapTime -= nodeReapPeriod
continue
for _, nodeMap := range []map[string]*node{
nDB.failedNodes,
nDB.leftNodes,
} {
for id, n := range nodeMap {
if n.reapTime > nodeReapPeriod {
n.reapTime -= nodeReapPeriod
continue
}
logrus.Debugf("Garbage collect node %v", n.Name)
delete(nodeMap, id)
}
logrus.Debugf("Removing failed node %v from gossip cluster", n.Name)
delete(nDB.failedNodes, id)
}
}
@ -379,7 +379,6 @@ func (nDB *NetworkDB) gossip() {
thisNodeNetworks := nDB.networks[nDB.config.NodeID]
for nid := range thisNodeNetworks {
networkNodes[nid] = nDB.networkNodes[nid]
}
printStats := time.Since(nDB.lastStatsTimestamp) >= nDB.config.StatsPrintPeriod
printHealth := time.Since(nDB.lastHealthTimestamp) >= nDB.config.HealthPrintPeriod

View file

@ -16,9 +16,12 @@ func (d *delegate) NodeMeta(limit int) []byte {
return []byte{}
}
func (nDB *NetworkDB) getNode(nEvent *NodeEvent) *node {
nDB.Lock()
defer nDB.Unlock()
// getNode searches the node inside the tables
// returns true if the node was respectively in the active list, explicit node leave list or failed list
func (nDB *NetworkDB) getNode(nEvent *NodeEvent, extract bool) (bool, bool, bool, *node) {
var active bool
var left bool
var failed bool
for _, nodes := range []map[string]*node{
nDB.failedNodes,
@ -26,35 +29,19 @@ func (nDB *NetworkDB) getNode(nEvent *NodeEvent) *node {
nDB.nodes,
} {
if n, ok := nodes[nEvent.NodeName]; ok {
active = &nodes == &nDB.nodes
left = &nodes == &nDB.leftNodes
failed = &nodes == &nDB.failedNodes
if n.ltime >= nEvent.LTime {
return nil
return active, left, failed, nil
}
return n
if extract {
delete(nodes, n.Name)
}
return active, left, failed, n
}
}
return nil
}
func (nDB *NetworkDB) checkAndGetNode(nEvent *NodeEvent) *node {
nDB.Lock()
defer nDB.Unlock()
for _, nodes := range []map[string]*node{
nDB.failedNodes,
nDB.leftNodes,
nDB.nodes,
} {
if n, ok := nodes[nEvent.NodeName]; ok {
if n.ltime >= nEvent.LTime {
return nil
}
delete(nodes, n.Name)
return n
}
}
return nil
return active, left, failed, nil
}
func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool {
@ -62,11 +49,14 @@ func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool {
// time.
nDB.networkClock.Witness(nEvent.LTime)
n := nDB.getNode(nEvent)
nDB.RLock()
active, left, _, n := nDB.getNode(nEvent, false)
if n == nil {
nDB.RUnlock()
return false
}
// If its a node leave event for a manager and this is the only manager we
nDB.RUnlock()
// If it is a node leave event for a manager and this is the only manager we
// know of we want the reconnect logic to kick in. In a single manager
// cluster manager's gossip can't be bootstrapped unless some other node
// connects to it.
@ -79,28 +69,38 @@ func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool {
}
}
n = nDB.checkAndGetNode(nEvent)
if n == nil {
return false
}
n.ltime = nEvent.LTime
switch nEvent.Type {
case NodeEventTypeJoin:
nDB.Lock()
_, found := nDB.nodes[n.Name]
nDB.nodes[n.Name] = n
nDB.Unlock()
if !found {
logrus.Infof("Node join event for %s/%s", n.Name, n.Addr)
if active {
// the node is already marked as active nothing to do
return false
}
nDB.Lock()
// Because the lock got released on the previous check we have to do it again and re verify the status of the node
// All of this is to avoid a big lock on the function
if active, _, _, n = nDB.getNode(nEvent, true); !active && n != nil {
n.reapTime = 0
nDB.nodes[n.Name] = n
logrus.Infof("%v(%v): Node join event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr)
}
nDB.Unlock()
return true
case NodeEventTypeLeave:
if left {
// the node is already marked as left nothing to do.
return false
}
nDB.Lock()
nDB.leftNodes[n.Name] = n
// Because the lock got released on the previous check we have to do it again and re verify the status of the node
// All of this is to avoid a big lock on the function
if _, left, _, n = nDB.getNode(nEvent, true); !left && n != nil {
n.reapTime = nodeReapInterval
nDB.leftNodes[n.Name] = n
logrus.Infof("%v(%v): Node leave event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr)
}
nDB.Unlock()
logrus.Infof("Node leave event for %s/%s", n.Name, n.Addr)
return true
}
@ -140,7 +140,7 @@ func (nDB *NetworkDB) handleNetworkEvent(nEvent *NetworkEvent) bool {
n.ltime = nEvent.LTime
n.leaving = nEvent.Type == NetworkEventTypeLeave
if n.leaving {
n.reapTime = reapNetworkInterval
n.reapTime = nDB.config.reapNetworkInterval
// The remote node is leaving the network, but not the gossip cluster.
// Mark all its entries in deleted state, this will guarantee that
@ -162,6 +162,12 @@ func (nDB *NetworkDB) handleNetworkEvent(nEvent *NetworkEvent) bool {
return false
}
// If the node is not known from memberlist we cannot process save any state of it else if it actually
// dies we won't receive any notification and we will remain stuck with it
if _, ok := nDB.nodes[nEvent.NodeName]; !ok {
return false
}
// This remote network join is being seen the first time.
nodeNetworks[nEvent.NetworkID] = &network{
id: nEvent.NetworkID,
@ -216,8 +222,9 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
// This case can happen if the cluster is running different versions of the engine where the old version does not have the
// field. If that is not the case, this can be a BUG
if e.deleting && e.reapTime == 0 {
logrus.Warnf("handleTableEvent object %+v has a 0 reapTime, is the cluster running the same docker engine version?", tEvent)
e.reapTime = reapEntryInterval
logrus.Warnf("%v(%v) handleTableEvent object %+v has a 0 reapTime, is the cluster running the same docker engine version?",
nDB.config.Hostname, nDB.config.NodeID, tEvent)
e.reapTime = nDB.config.reapEntryInterval
}
nDB.Lock()
@ -229,7 +236,7 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
// If the residual reapTime is lower or equal to 1/6 of the total reapTime don't bother broadcasting it around
// most likely the cluster is already aware of it, if not who will sync with this node will catch the state too.
// This also avoids that deletion of entries close to their garbage collection ends up circuling around forever
return e.reapTime > reapEntryInterval/6
return e.reapTime > nDB.config.reapEntryInterval/6
}
var op opType
@ -465,7 +472,7 @@ func (d *delegate) MergeRemoteState(buf []byte, isJoin bool) {
var gMsg GossipMessage
err := proto.Unmarshal(buf, &gMsg)
if err != nil {
logrus.Errorf("Error unmarshalling push pull messsage: %v", err)
logrus.Errorf("Error unmarshalling push pull message: %v", err)
return
}

View file

@ -21,10 +21,29 @@ func (e *eventDelegate) broadcastNodeEvent(addr net.IP, op opType) {
}
}
func (e *eventDelegate) purgeReincarnation(mn *memberlist.Node) {
for name, node := range e.nDB.failedNodes {
if node.Addr.Equal(mn.Addr) {
logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", mn.Name, mn.Addr, name, node.Addr)
delete(e.nDB.failedNodes, name)
return
}
}
for name, node := range e.nDB.leftNodes {
if node.Addr.Equal(mn.Addr) {
logrus.Infof("Node %s/%s, is the new incarnation of the shutdown node %s/%s", mn.Name, mn.Addr, name, node.Addr)
delete(e.nDB.leftNodes, name)
return
}
}
}
func (e *eventDelegate) NotifyJoin(mn *memberlist.Node) {
logrus.Infof("Node %s/%s, joined gossip cluster", mn.Name, mn.Addr)
e.broadcastNodeEvent(mn.Addr, opCreate)
e.nDB.Lock()
defer e.nDB.Unlock()
// In case the node is rejoining after a failure or leave,
// wait until an explicit join message arrives before adding
// it to the nodes just to make sure this is not a stale
@ -32,12 +51,15 @@ func (e *eventDelegate) NotifyJoin(mn *memberlist.Node) {
_, fOk := e.nDB.failedNodes[mn.Name]
_, lOk := e.nDB.leftNodes[mn.Name]
if fOk || lOk {
e.nDB.Unlock()
return
}
// Every node has a unique ID
// Check on the base of the IP address if the new node that joined is actually a new incarnation of a previous
// failed or shutdown one
e.purgeReincarnation(mn)
e.nDB.nodes[mn.Name] = &node{Node: *mn}
e.nDB.Unlock()
logrus.Infof("Node %s/%s, added to nodes list", mn.Name, mn.Addr)
}
@ -49,18 +71,28 @@ func (e *eventDelegate) NotifyLeave(mn *memberlist.Node) {
// If the node was temporary down, deleting the entries will guarantee that the CREATE events will be accepted
// If the node instead left because was going down, then it makes sense to just delete all its state
e.nDB.Lock()
defer e.nDB.Unlock()
e.nDB.deleteNetworkEntriesForNode(mn.Name)
e.nDB.deleteNodeTableEntries(mn.Name)
if n, ok := e.nDB.nodes[mn.Name]; ok {
delete(e.nDB.nodes, mn.Name)
// Check if a new incarnation of the same node already joined
// In that case this node can simply be removed and no further action are needed
for name, node := range e.nDB.nodes {
if node.Addr.Equal(mn.Addr) {
logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", name, node.Addr, mn.Name, mn.Addr)
return
}
}
// In case of node failure, keep retrying to reconnect every retryInterval (1sec) for nodeReapInterval (24h)
// Explicit leave will have already removed the node from the list of nodes (nDB.nodes) and put it into the leftNodes map
n.reapTime = nodeReapInterval
e.nDB.failedNodes[mn.Name] = n
failed = true
}
e.nDB.Unlock()
if failed {
logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr)
}

View file

@ -181,6 +181,13 @@ type Config struct {
// be able to increase this to get more content into each gossip packet.
PacketBufferSize int
// reapEntryInterval duration of a deleted entry before being garbage collected
reapEntryInterval time.Duration
// reapNetworkInterval duration of a delted network before being garbage collected
// NOTE this MUST always be higher than reapEntryInterval
reapNetworkInterval time.Duration
// StatsPrintPeriod the period to use to print queue stats
// Default is 5min
StatsPrintPeriod time.Duration
@ -220,12 +227,18 @@ func DefaultConfig() *Config {
PacketBufferSize: 1400,
StatsPrintPeriod: 5 * time.Minute,
HealthPrintPeriod: 1 * time.Minute,
reapEntryInterval: 30 * time.Minute,
}
}
// New creates a new instance of NetworkDB using the Config passed by
// the caller.
func New(c *Config) (*NetworkDB, error) {
// The garbage collection logic for entries leverage the presence of the network.
// For this reason the expiration time of the network is put slightly higher than the entry expiration so that
// there is at least 5 extra cycle to make sure that all the entries are properly deleted before deleting the network.
c.reapNetworkInterval = c.reapEntryInterval + 5*reapPeriod
nDB := &NetworkDB{
config: c,
indexes: make(map[int]*radix.Tree),
@ -241,7 +254,7 @@ func New(c *Config) (*NetworkDB, error) {
nDB.indexes[byTable] = radix.New()
nDB.indexes[byNetwork] = radix.New()
logrus.Debugf("New memberlist node - Node:%v will use memberlist nodeID:%v", c.Hostname, c.NodeID)
logrus.Infof("New memberlist node - Node:%v will use memberlist nodeID:%v with config:%+v", c.Hostname, c.NodeID, c)
if err := nDB.clusterInit(); err != nil {
return nil, err
}
@ -297,6 +310,10 @@ func (nDB *NetworkDB) Peers(nid string) []PeerInfo {
Name: node.Name,
IP: node.Addr.String(),
})
} else {
// Added for testing purposes, this condition should never happen else mean that the network list
// is out of sync with the node list
peers = append(peers, PeerInfo{})
}
}
return peers
@ -414,7 +431,7 @@ func (nDB *NetworkDB) DeleteEntry(tname, nid, key string) error {
node: nDB.config.NodeID,
value: value,
deleting: true,
reapTime: reapEntryInterval,
reapTime: nDB.config.reapEntryInterval,
}
if err := nDB.sendTableEvent(TableEventTypeDelete, nid, tname, key, entry); err != nil {
@ -487,7 +504,7 @@ func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) {
node: oldEntry.node,
value: oldEntry.value,
deleting: true,
reapTime: reapEntryInterval,
reapTime: nDB.config.reapEntryInterval,
}
// we arrived at this point in 2 cases:
@ -580,6 +597,9 @@ func (nDB *NetworkDB) JoinNetwork(nid string) error {
nodeNetworks[nid] = &network{id: nid, ltime: ltime, entriesNumber: entries}
nodeNetworks[nid].tableBroadcasts = &memberlist.TransmitLimitedQueue{
NumNodes: func() int {
//TODO fcrisciani this can be optimized maybe avoiding the lock?
// this call is done each GetBroadcasts call to evaluate the number of
// replicas for the message
nDB.RLock()
defer nDB.RUnlock()
return len(nDB.networkNodes[nid])
@ -635,7 +655,7 @@ func (nDB *NetworkDB) LeaveNetwork(nid string) error {
logrus.Debugf("%v(%v): leaving network %s", nDB.config.Hostname, nDB.config.NodeID, nid)
n.ltime = ltime
n.reapTime = reapNetworkInterval
n.reapTime = nDB.config.reapNetworkInterval
n.leaving = true
return nil
}

View file

@ -5,7 +5,9 @@ import (
"net/http"
"strings"
stackdump "github.com/docker/docker/pkg/signal"
"github.com/docker/libnetwork/diagnose"
"github.com/sirupsen/logrus"
)
const (
@ -24,6 +26,7 @@ var NetDbPaths2Func = map[string]diagnose.HTTPHandlerFunc{
"/deleteentry": dbDeleteEntry,
"/getentry": dbGetEntry,
"/gettable": dbGetTable,
"/dump": dbStackTrace,
}
func dbJoin(ctx interface{}, w http.ResponseWriter, r *http.Request) {
@ -240,3 +243,12 @@ func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) {
}
}
}
func dbStackTrace(ctx interface{}, w http.ResponseWriter, r *http.Request) {
path, err := stackdump.DumpStacks("/tmp/")
if err != nil {
logrus.WithError(err).Error("failed to write goroutines dump")
} else {
fmt.Fprintf(w, "goroutine stacks written to %s", path)
}
}

View file

@ -1,4 +0,0 @@
package osl
// IfaceOption is a function option type to set interface options
type IfaceOption func()

View file

@ -1,4 +0,0 @@
package osl
// NeighOption is a function option type to set interface options
type NeighOption func()

View file

@ -1,24 +0,0 @@
package osl
// NewSandbox provides a new sandbox instance created in an os specific way
// provided a key which uniquely identifies the sandbox
func NewSandbox(key string, osCreate, isRestore bool) (Sandbox, error) {
return nil, nil
}
// GenerateKey generates a sandbox key based on the passed
// container id.
func GenerateKey(containerID string) string {
maxLen := 12
if len(containerID) < maxLen {
maxLen = len(containerID)
}
return containerID[:maxLen]
}
// InitOSContext initializes OS context while configuring network resources
func InitOSContext() func() {
return func() {}
}

View file

@ -1,4 +1,4 @@
// +build !linux,!windows,!freebsd,!solaris
// +build !linux,!windows,!freebsd
package osl

View file

@ -1,5 +0,0 @@
package portallocator
func getDynamicPortRange() (start int, end int, err error) {
return 32768, 65535, nil
}

View file

@ -10,7 +10,7 @@ import (
"time"
)
const userlandProxyCommandName = "docker-proxy"
var userlandProxyCommandName = "docker-proxy"
type userlandProxy interface {
Start() error

View file

@ -1,34 +0,0 @@
package portmapper
import (
"net"
"os/exec"
"strconv"
)
func newProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int, proxyPath string) (userlandProxy, error) {
path := proxyPath
if proxyPath == "" {
cmd, err := exec.LookPath(userlandProxyCommandName)
if err != nil {
return nil, err
}
path = cmd
}
args := []string{
path,
"-proto", proto,
"-host-ip", hostIP.String(),
"-host-port", strconv.Itoa(hostPort),
"-container-ip", containerIP.String(),
"-container-port", strconv.Itoa(containerPort),
}
return &proxyCommand{
cmd: &exec.Cmd{
Path: path,
Args: args,
},
}, nil
}

View file

@ -1,45 +0,0 @@
// +build solaris
package libnetwork
import (
"io"
"net"
"github.com/docker/libnetwork/types"
)
// processSetKeyReexec is a private function that must be called only on an reexec path
// It expects 3 args { [0] = "libnetwork-setkey", [1] = <container-id>, [2] = <controller-id> }
// It also expects libcontainer.State as a json string in <stdin>
// Refer to https://github.com/opencontainers/runc/pull/160/ for more information
func processSetKeyReexec() {
}
// SetExternalKey provides a convenient way to set an External key to a sandbox
func SetExternalKey(controllerID string, containerID string, key string) error {
return types.NotImplementedErrorf("SetExternalKey isn't supported on non linux systems")
}
func sendKey(c net.Conn, data setKeyData) error {
return types.NotImplementedErrorf("sendKey isn't supported on non linux systems")
}
func processReturn(r io.Reader) error {
return types.NotImplementedErrorf("processReturn isn't supported on non linux systems")
}
// no-op on non linux systems
func (c *controller) startExternalKeyListener() error {
return nil
}
func (c *controller) acceptClientConnections(sock string, l net.Listener) {
}
func (c *controller) processExternalKey(conn net.Conn) error {
return types.NotImplementedErrorf("processExternalKey isn't supported on non linux systems")
}
func (c *controller) stopExternalKeyListener() {
}

View file

@ -256,6 +256,7 @@ retry:
if err := cs.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil {
return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err)
}
logrus.Errorf("Error (%v) deleting object %v, retrying....", err, kvObject.Key())
goto retry
}
return err

View file

@ -1,44 +1,53 @@
github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/Microsoft/hcsshim v0.6.3
github.com/Microsoft/go-winio v0.4.5
github.com/Microsoft/hcsshim v0.6.5
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/boltdb/bolt c6ba97b89e0454fec9aa92e1d33a4e2c5fc1f631
github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
github.com/codegangsta/cli a65b733b303f0055f8d324d805f393cd3e7a7904
github.com/coreos/etcd 925d1d74cec8c3b169c52fd4b2dc234a35934fce
github.com/coreos/go-systemd b4a58d95188dd092ae20072bac14cece0e67c388
github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e
github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
github.com/coreos/etcd v3.2.1
github.com/coreos/go-semver v0.2.0
github.com/coreos/go-systemd v4
github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
github.com/docker/docker 2cac43e3573893cf8fd816e0ad5615426acb87f4 https://github.com/dmcgowan/docker.git
github.com/docker/docker a3efe9722f34af5cf4443fe3a5c4e4e3e0457b54
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/docker/go-units 8e2d4523730c73120e10d4652f36ad6010998f4e
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
github.com/godbus/dbus 5f6efc7ef2759c81b7ba876593971bfce311eab3
github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2
github.com/golang/protobuf f7137ae6b19afbfd61a94b746fda3b3fe0491874
github.com/gorilla/context 215affda49addc4c8ef7e2534915df2c8c35c6cd
github.com/gorilla/mux 8096f47503459bcc74d1f4c487b7e6e42e5746b5
github.com/hashicorp/consul 954aec66231b79c161a4122b023fbcad13047f79
github.com/godbus/dbus v4.0.0
github.com/gogo/protobuf v0.4
github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
github.com/gorilla/context v1.1
github.com/gorilla/mux v1.1
github.com/hashicorp/consul v0.5.2
github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
github.com/hashicorp/go-multierror 2167c8ec40776024589f483a6b836489e47e1049
github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
github.com/hashicorp/memberlist v0.1.0
github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
github.com/mattn/go-shellwords 525bedee691b5a8df547cb5cf9f86b7fb1883e24
github.com/miekg/dns d27455715200c7d3e321a1e5cadb27c9ee0b0f02
github.com/opencontainers/runc 8694d576ea3ce3c9e2c804b7f91b4e1e9a575d1c https://github.com/dmcgowan/runc.git
github.com/mattn/go-shellwords v1.0.3
github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
github.com/mrunalp/fileutils ed869b029674c0e9ce4c0dfa781405c2d9946d08
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
github.com/opencontainers/runc 0351df1c5a66838d0c392b4ac4cf9450de844e2d
github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/selinux v1.0.0-rc1
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1
github.com/sirupsen/logrus v1.0.1
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
github.com/sirupsen/logrus v1.0.3
github.com/stretchr/testify dab07ac62d4905d3e48d17dc549c684ac3b7c15a
github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
golang.org/x/net c427ad74c6d7a814201695e9ffde0c5d400a7674
golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9