Vendoring latest swarmkit and libnetwork
Signed-off-by: Sandeep Bansal <msabansal@microsoft.com>
This commit is contained in:
parent
ed8ccc3046
commit
6e95165679
78 changed files with 6159 additions and 566 deletions
|
@ -23,7 +23,7 @@ github.com/RackSec/srslog 365bf33cd9acc21ae1c355209865f17228ca534e
|
|||
github.com/imdario/mergo 0.2.1
|
||||
|
||||
#get libnetwork packages
|
||||
github.com/docker/libnetwork a98901aebe7ce920b6fbf02ebe5c3afc9ca975b8
|
||||
github.com/docker/libnetwork 3ab699ea36573d98f481d233c30c742ade737565
|
||||
github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894
|
||||
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
||||
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||
|
@ -100,7 +100,7 @@ github.com/docker/containerd 8517738ba4b82aff5662c97ca4627e7e4d03b531
|
|||
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit bddd3f0fb45491987d3dec5fb48311d289d21393
|
||||
github.com/docker/swarmkit ce07d9f69c9b4a1b1eb508e777c44eeacca87065
|
||||
github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
|
||||
github.com/gogo/protobuf v0.3
|
||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||
|
|
4
vendor/github.com/docker/libnetwork/bitseq/sequence.go
generated
vendored
4
vendor/github.com/docker/libnetwork/bitseq/sequence.go
generated
vendored
|
@ -9,7 +9,7 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
@ -286,7 +286,7 @@ func (h *Handle) CheckConsistency() error {
|
|||
continue
|
||||
}
|
||||
|
||||
log.Infof("Fixed inconsistent bit sequence in datastore:\n%s\n%s", h, nh)
|
||||
logrus.Infof("Fixed inconsistent bit sequence in datastore:\n%s\n%s", h, nh)
|
||||
|
||||
h.Lock()
|
||||
h.head = nh.head
|
||||
|
|
22
vendor/github.com/docker/libnetwork/config/config.go
generated
vendored
22
vendor/github.com/docker/libnetwork/config/config.go
generated
vendored
|
@ -6,7 +6,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
|
@ -100,7 +100,7 @@ type Option func(c *Config)
|
|||
// OptionDefaultNetwork function returns an option setter for a default network
|
||||
func OptionDefaultNetwork(dn string) Option {
|
||||
return func(c *Config) {
|
||||
log.Debugf("Option DefaultNetwork: %s", dn)
|
||||
logrus.Debugf("Option DefaultNetwork: %s", dn)
|
||||
c.Daemon.DefaultNetwork = strings.TrimSpace(dn)
|
||||
}
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ func OptionDefaultNetwork(dn string) Option {
|
|||
// OptionDefaultDriver function returns an option setter for default driver
|
||||
func OptionDefaultDriver(dd string) Option {
|
||||
return func(c *Config) {
|
||||
log.Debugf("Option DefaultDriver: %s", dd)
|
||||
logrus.Debugf("Option DefaultDriver: %s", dd)
|
||||
c.Daemon.DefaultDriver = strings.TrimSpace(dd)
|
||||
}
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func OptionLabels(labels []string) Option {
|
|||
// OptionKVProvider function returns an option setter for kvstore provider
|
||||
func OptionKVProvider(provider string) Option {
|
||||
return func(c *Config) {
|
||||
log.Debugf("Option OptionKVProvider: %s", provider)
|
||||
logrus.Debugf("Option OptionKVProvider: %s", provider)
|
||||
if _, ok := c.Scopes[datastore.GlobalScope]; !ok {
|
||||
c.Scopes[datastore.GlobalScope] = &datastore.ScopeCfg{}
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ func OptionKVProvider(provider string) Option {
|
|||
// OptionKVProviderURL function returns an option setter for kvstore url
|
||||
func OptionKVProviderURL(url string) Option {
|
||||
return func(c *Config) {
|
||||
log.Debugf("Option OptionKVProviderURL: %s", url)
|
||||
logrus.Debugf("Option OptionKVProviderURL: %s", url)
|
||||
if _, ok := c.Scopes[datastore.GlobalScope]; !ok {
|
||||
c.Scopes[datastore.GlobalScope] = &datastore.ScopeCfg{}
|
||||
}
|
||||
|
@ -157,14 +157,14 @@ func OptionKVProviderURL(url string) Option {
|
|||
func OptionKVOpts(opts map[string]string) Option {
|
||||
return func(c *Config) {
|
||||
if opts["kv.cacertfile"] != "" && opts["kv.certfile"] != "" && opts["kv.keyfile"] != "" {
|
||||
log.Info("Option Initializing KV with TLS")
|
||||
logrus.Info("Option Initializing KV with TLS")
|
||||
tlsConfig, err := tlsconfig.Client(tlsconfig.Options{
|
||||
CAFile: opts["kv.cacertfile"],
|
||||
CertFile: opts["kv.certfile"],
|
||||
KeyFile: opts["kv.keyfile"],
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Unable to set up TLS: %s", err)
|
||||
logrus.Errorf("Unable to set up TLS: %s", err)
|
||||
return
|
||||
}
|
||||
if _, ok := c.Scopes[datastore.GlobalScope]; !ok {
|
||||
|
@ -182,7 +182,7 @@ func OptionKVOpts(opts map[string]string) Option {
|
|||
KeyFile: opts["kv.keyfile"],
|
||||
}
|
||||
} else {
|
||||
log.Info("Option Initializing KV without TLS")
|
||||
logrus.Info("Option Initializing KV without TLS")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ func ValidateName(name string) error {
|
|||
// OptionLocalKVProvider function returns an option setter for kvstore provider
|
||||
func OptionLocalKVProvider(provider string) Option {
|
||||
return func(c *Config) {
|
||||
log.Debugf("Option OptionLocalKVProvider: %s", provider)
|
||||
logrus.Debugf("Option OptionLocalKVProvider: %s", provider)
|
||||
if _, ok := c.Scopes[datastore.LocalScope]; !ok {
|
||||
c.Scopes[datastore.LocalScope] = &datastore.ScopeCfg{}
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ func OptionLocalKVProvider(provider string) Option {
|
|||
// OptionLocalKVProviderURL function returns an option setter for kvstore url
|
||||
func OptionLocalKVProviderURL(url string) Option {
|
||||
return func(c *Config) {
|
||||
log.Debugf("Option OptionLocalKVProviderURL: %s", url)
|
||||
logrus.Debugf("Option OptionLocalKVProviderURL: %s", url)
|
||||
if _, ok := c.Scopes[datastore.LocalScope]; !ok {
|
||||
c.Scopes[datastore.LocalScope] = &datastore.ScopeCfg{}
|
||||
}
|
||||
|
@ -264,7 +264,7 @@ func OptionLocalKVProviderURL(url string) Option {
|
|||
// OptionLocalKVProviderConfig function returns an option setter for kvstore config
|
||||
func OptionLocalKVProviderConfig(config *store.Config) Option {
|
||||
return func(c *Config) {
|
||||
log.Debugf("Option OptionLocalKVProviderConfig: %v", config)
|
||||
logrus.Debugf("Option OptionLocalKVProviderConfig: %v", config)
|
||||
if _, ok := c.Scopes[datastore.LocalScope]; !ok {
|
||||
c.Scopes[datastore.LocalScope] = &datastore.ScopeCfg{}
|
||||
}
|
||||
|
|
43
vendor/github.com/docker/libnetwork/controller.go
generated
vendored
43
vendor/github.com/docker/libnetwork/controller.go
generated
vendored
|
@ -51,7 +51,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
|
@ -212,7 +212,7 @@ func New(cfgOptions ...config.Option) (NetworkController, error) {
|
|||
if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil {
|
||||
// Failing to initialize discovery is a bad situation to be in.
|
||||
// But it cannot fail creating the Controller
|
||||
log.Errorf("Failed to Initialize Discovery : %v", err)
|
||||
logrus.Errorf("Failed to Initialize Discovery : %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -283,7 +283,7 @@ func (c *controller) SetKeys(keys []*types.EncryptionKey) error {
|
|||
if clusterConfigAvailable {
|
||||
return c.agentSetup()
|
||||
}
|
||||
log.Debugf("received encryption keys before cluster config")
|
||||
logrus.Debug("received encryption keys before cluster config")
|
||||
return nil
|
||||
}
|
||||
if agent == nil {
|
||||
|
@ -441,7 +441,7 @@ func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error {
|
|||
c.drvRegistry.WalkIPAMs(func(name string, driver ipamapi.Ipam, cap *ipamapi.Capability) bool {
|
||||
err := driver.DiscoverNew(discoverapi.DatastoreConfig, *dsConfig)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to set datastore in driver %s: %v", name, err)
|
||||
logrus.Errorf("Failed to set datastore in driver %s: %v", name, err)
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
@ -449,14 +449,14 @@ func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error {
|
|||
c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool {
|
||||
err := driver.DiscoverNew(discoverapi.DatastoreConfig, *dsConfig)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to set datastore in driver %s: %v", name, err)
|
||||
logrus.Errorf("Failed to set datastore in driver %s: %v", name, err)
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
if c.discovery == nil && c.cfg.Cluster.Watcher != nil {
|
||||
if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil {
|
||||
log.Errorf("Failed to Initialize Discovery after configuration update: %v", err)
|
||||
logrus.Errorf("Failed to Initialize Discovery after configuration update: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -561,7 +561,7 @@ func (c *controller) pushNodeDiscovery(d driverapi.Driver, cap driverapi.Capabil
|
|||
err = d.DiscoverDelete(discoverapi.NodeDiscovery, nodeData)
|
||||
}
|
||||
if err != nil {
|
||||
log.Debugf("discovery notification error : %v", err)
|
||||
logrus.Debugf("discovery notification error : %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -634,12 +634,13 @@ func (c *controller) NewNetwork(networkType, name string, id string, options ...
|
|||
id = stringid.GenerateRandomID()
|
||||
}
|
||||
|
||||
defaultIpam := defaultIpamForNetworkType(networkType)
|
||||
// Construct the network object
|
||||
network := &network{
|
||||
name: name,
|
||||
networkType: networkType,
|
||||
generic: map[string]interface{}{netlabel.GenericData: make(map[string]string)},
|
||||
ipamType: ipamapi.DefaultIPAM,
|
||||
ipamType: defaultIpam,
|
||||
id: id,
|
||||
created: time.Now(),
|
||||
ctrlr: c,
|
||||
|
@ -686,7 +687,7 @@ func (c *controller) NewNetwork(networkType, name string, id string, options ...
|
|||
defer func() {
|
||||
if err != nil {
|
||||
if e := network.deleteNetwork(); e != nil {
|
||||
log.Warnf("couldn't roll back driver network on network %s creation failure: %v", network.name, err)
|
||||
logrus.Warnf("couldn't roll back driver network on network %s creation failure: %v", network.name, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -701,7 +702,7 @@ func (c *controller) NewNetwork(networkType, name string, id string, options ...
|
|||
defer func() {
|
||||
if err != nil {
|
||||
if e := c.deleteFromStore(epCnt); e != nil {
|
||||
log.Warnf("couldnt rollback from store, epCnt %v on failure (%v): %v", epCnt, err, e)
|
||||
logrus.Warnf("could not rollback from store, epCnt %v on failure (%v): %v", epCnt, err, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -722,7 +723,7 @@ func (c *controller) NewNetwork(networkType, name string, id string, options ...
|
|||
var joinCluster NetworkWalker = func(nw Network) bool {
|
||||
n := nw.(*network)
|
||||
if err := n.joinCluster(); err != nil {
|
||||
log.Errorf("Failed to join network %s (%s) into agent cluster: %v", n.Name(), n.ID(), err)
|
||||
logrus.Errorf("Failed to join network %s (%s) into agent cluster: %v", n.Name(), n.ID(), err)
|
||||
}
|
||||
n.addDriverWatches()
|
||||
return false
|
||||
|
@ -731,7 +732,7 @@ var joinCluster NetworkWalker = func(nw Network) bool {
|
|||
func (c *controller) reservePools() {
|
||||
networks, err := c.getNetworksForScope(datastore.LocalScope)
|
||||
if err != nil {
|
||||
log.Warnf("Could not retrieve networks from local store during ipam allocation for existing networks: %v", err)
|
||||
logrus.Warnf("Could not retrieve networks from local store during ipam allocation for existing networks: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -763,22 +764,22 @@ func (c *controller) reservePools() {
|
|||
}
|
||||
// Reserve pools
|
||||
if err := n.ipamAllocate(); err != nil {
|
||||
log.Warnf("Failed to allocate ipam pool(s) for network %q (%s): %v", n.Name(), n.ID(), err)
|
||||
logrus.Warnf("Failed to allocate ipam pool(s) for network %q (%s): %v", n.Name(), n.ID(), err)
|
||||
}
|
||||
// Reserve existing endpoints' addresses
|
||||
ipam, _, err := n.getController().getIPAMDriver(n.ipamType)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to retrieve ipam driver for network %q (%s) during address reservation", n.Name(), n.ID())
|
||||
logrus.Warnf("Failed to retrieve ipam driver for network %q (%s) during address reservation", n.Name(), n.ID())
|
||||
continue
|
||||
}
|
||||
epl, err := n.getEndpointsFromStore()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to retrieve list of current endpoints on network %q (%s)", n.Name(), n.ID())
|
||||
logrus.Warnf("Failed to retrieve list of current endpoints on network %q (%s)", n.Name(), n.ID())
|
||||
continue
|
||||
}
|
||||
for _, ep := range epl {
|
||||
if err := ep.assignAddress(ipam, true, ep.Iface().AddressIPv6() != nil); err != nil {
|
||||
log.Warnf("Failed to reserve current adress for endpoint %q (%s) on network %q (%s)",
|
||||
logrus.Warnf("Failed to reserve current adress for endpoint %q (%s) on network %q (%s)",
|
||||
ep.Name(), ep.ID(), n.Name(), n.ID())
|
||||
}
|
||||
}
|
||||
|
@ -788,7 +789,7 @@ func (c *controller) reservePools() {
|
|||
func doReplayPoolReserve(n *network) bool {
|
||||
_, caps, err := n.getController().getIPAMDriver(n.ipamType)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to retrieve ipam driver for network %q (%s): %v", n.Name(), n.ID(), err)
|
||||
logrus.Warnf("Failed to retrieve ipam driver for network %q (%s): %v", n.Name(), n.ID(), err)
|
||||
return false
|
||||
}
|
||||
return caps.RequiresRequestReplay
|
||||
|
@ -815,7 +816,7 @@ func (c *controller) Networks() []Network {
|
|||
|
||||
networks, err := c.getNetworksFromStore()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
logrus.Error(err)
|
||||
}
|
||||
|
||||
for _, n := range networks {
|
||||
|
@ -1132,18 +1133,18 @@ func (c *controller) clearIngress(clusterLeave bool) {
|
|||
|
||||
if ingressSandbox != nil {
|
||||
if err := ingressSandbox.Delete(); err != nil {
|
||||
log.Warnf("Could not delete ingress sandbox while leaving: %v", err)
|
||||
logrus.Warnf("Could not delete ingress sandbox while leaving: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
n, err := c.NetworkByName("ingress")
|
||||
if err != nil && clusterLeave {
|
||||
log.Warnf("Could not find ingress network while leaving: %v", err)
|
||||
logrus.Warnf("Could not find ingress network while leaving: %v", err)
|
||||
}
|
||||
|
||||
if n != nil {
|
||||
if err := n.Delete(); err != nil {
|
||||
log.Warnf("Could not delete ingress network while leaving: %v", err)
|
||||
logrus.Warnf("Could not delete ingress network while leaving: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
4
vendor/github.com/docker/libnetwork/datastore/datastore.go
generated
vendored
4
vendor/github.com/docker/libnetwork/datastore/datastore.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libkv"
|
||||
"github.com/docker/libkv/store"
|
||||
|
@ -134,7 +135,8 @@ func makeDefaultScopes() map[string]*ScopeCfg {
|
|||
Provider: string(store.BOLTDB),
|
||||
Address: defaultPrefix + "/local-kv.db",
|
||||
Config: &store.Config{
|
||||
Bucket: "libnetwork",
|
||||
Bucket: "libnetwork",
|
||||
ConnectionTimeout: time.Minute,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
24
vendor/github.com/docker/libnetwork/default_gateway.go
generated
vendored
24
vendor/github.com/docker/libnetwork/default_gateway.go
generated
vendored
|
@ -2,13 +2,14 @@ package libnetwork
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/libnetwork/netlabel"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
||||
const (
|
||||
libnGWNetwork = "docker_gwbridge"
|
||||
gwEPlen = 12
|
||||
gwEPlen = 12
|
||||
)
|
||||
|
||||
var procGwNetwork = make(chan (bool), 1)
|
||||
|
@ -52,6 +53,21 @@ func (sb *sandbox) setupDefaultGW() error {
|
|||
eplen = len(sb.containerID)
|
||||
}
|
||||
|
||||
sbLabels := sb.Labels()
|
||||
|
||||
if sbLabels[netlabel.PortMap] != nil {
|
||||
createOptions = append(createOptions, CreateOptionPortMapping(sbLabels[netlabel.PortMap].([]types.PortBinding)))
|
||||
}
|
||||
|
||||
if sbLabels[netlabel.ExposedPorts] != nil {
|
||||
createOptions = append(createOptions, CreateOptionExposedPorts(sbLabels[netlabel.ExposedPorts].([]types.TransportPort)))
|
||||
}
|
||||
|
||||
epOption := getPlatformOption()
|
||||
if epOption != nil {
|
||||
createOptions = append(createOptions, epOption)
|
||||
}
|
||||
|
||||
newEp, err := n.CreateEndpoint("gateway_"+sb.containerID[0:eplen], createOptions...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("container %s: endpoint create on GW Network failed: %v", sb.containerID, err)
|
||||
|
@ -119,7 +135,7 @@ func (sb *sandbox) needDefaultGW() bool {
|
|||
|
||||
func (sb *sandbox) getEndpointInGWNetwork() *endpoint {
|
||||
for _, ep := range sb.getConnectedEndpoints() {
|
||||
if ep.getNetwork().name == libnGWNetwork {
|
||||
if ep.getNetwork().name == libnGWNetwork && strings.HasPrefix(ep.Name(), "gateway_") {
|
||||
return ep
|
||||
}
|
||||
}
|
||||
|
@ -127,7 +143,7 @@ func (sb *sandbox) getEndpointInGWNetwork() *endpoint {
|
|||
}
|
||||
|
||||
func (ep *endpoint) endpointInGWNetwork() bool {
|
||||
if ep.getNetwork().name == libnGWNetwork {
|
||||
if ep.getNetwork().name == libnGWNetwork && strings.HasPrefix(ep.Name(), "gateway_") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
6
vendor/github.com/docker/libnetwork/default_gateway_freebsd.go
generated
vendored
6
vendor/github.com/docker/libnetwork/default_gateway_freebsd.go
generated
vendored
|
@ -2,6 +2,12 @@ package libnetwork
|
|||
|
||||
import "github.com/docker/libnetwork/types"
|
||||
|
||||
const libnGWNetwork = "docker_gwbridge"
|
||||
|
||||
func getPlatformOption() EndpointOption {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) createGWNetwork() (Network, error) {
|
||||
return nil, types.NotImplementedErrorf("default gateway functionality is not implemented in freebsd")
|
||||
}
|
||||
|
|
6
vendor/github.com/docker/libnetwork/default_gateway_linux.go
generated
vendored
6
vendor/github.com/docker/libnetwork/default_gateway_linux.go
generated
vendored
|
@ -7,6 +7,12 @@ import (
|
|||
"github.com/docker/libnetwork/drivers/bridge"
|
||||
)
|
||||
|
||||
const libnGWNetwork = "docker_gwbridge"
|
||||
|
||||
func getPlatformOption() EndpointOption {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) createGWNetwork() (Network, error) {
|
||||
netOption := map[string]string{
|
||||
bridge.BridgeName: libnGWNetwork,
|
||||
|
|
29
vendor/github.com/docker/libnetwork/default_gateway_solaris.go
generated
vendored
29
vendor/github.com/docker/libnetwork/default_gateway_solaris.go
generated
vendored
|
@ -1,7 +1,32 @@
|
|||
package libnetwork
|
||||
|
||||
import "github.com/docker/libnetwork/types"
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/libnetwork/drivers/solaris/bridge"
|
||||
)
|
||||
|
||||
const libnGWNetwork = "docker_gwbridge"
|
||||
|
||||
func getPlatformOption() EndpointOption {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) createGWNetwork() (Network, error) {
|
||||
return nil, types.NotImplementedErrorf("default gateway functionality is not implemented in solaris")
|
||||
netOption := map[string]string{
|
||||
bridge.BridgeName: libnGWNetwork,
|
||||
bridge.EnableICC: strconv.FormatBool(false),
|
||||
bridge.EnableIPMasquerade: strconv.FormatBool(true),
|
||||
}
|
||||
|
||||
n, err := c.NewNetwork("bridge", libnGWNetwork, "",
|
||||
NetworkOptionDriverOpts(netOption),
|
||||
NetworkOptionEnableIPv6(false),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating external connectivity network: %v", err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
|
17
vendor/github.com/docker/libnetwork/default_gateway_windows.go
generated
vendored
17
vendor/github.com/docker/libnetwork/default_gateway_windows.go
generated
vendored
|
@ -1,6 +1,21 @@
|
|||
package libnetwork
|
||||
|
||||
import "github.com/docker/libnetwork/types"
|
||||
import (
|
||||
windriver "github.com/docker/libnetwork/drivers/windows"
|
||||
"github.com/docker/libnetwork/options"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
||||
const libnGWNetwork = "nat"
|
||||
|
||||
func getPlatformOption() EndpointOption {
|
||||
|
||||
epOption := options.Generic{
|
||||
windriver.DisableICC: true,
|
||||
windriver.DisableDNS: true,
|
||||
}
|
||||
return EndpointOptionGeneric(epOption)
|
||||
}
|
||||
|
||||
func (c *controller) createGWNetwork() (Network, error) {
|
||||
return nil, types.NotImplementedErrorf("default gateway functionality is not implemented in windows")
|
||||
|
|
4
vendor/github.com/docker/libnetwork/drivers/bridge/link.go
generated
vendored
4
vendor/github.com/docker/libnetwork/drivers/bridge/link.go
generated
vendored
|
@ -4,7 +4,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/iptables"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
@ -44,7 +44,7 @@ func (l *link) Disable() {
|
|||
// -D == iptables delete flag
|
||||
err := linkContainers("-D", l.parentIP, l.childIP, l.ports, l.bridge, true)
|
||||
if err != nil {
|
||||
log.Errorf("Error removing IPTables rules for a link %s due to %s", l.String(), err.Error())
|
||||
logrus.Errorf("Error removing IPTables rules for a link %s due to %s", l.String(), err.Error())
|
||||
}
|
||||
// Return proper error once we move to use a proper iptables package
|
||||
// that returns typed errors
|
||||
|
|
4
vendor/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go
generated
vendored
4
vendor/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go
generated
vendored
|
@ -6,7 +6,7 @@ import (
|
|||
"net"
|
||||
"path/filepath"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
|
@ -39,7 +39,7 @@ func setupBridgeIPv4(config *networkConfiguration, i *bridgeInterface) error {
|
|||
return fmt.Errorf("failed to remove current ip address from bridge: %v", err)
|
||||
}
|
||||
}
|
||||
log.Debugf("Assigning address to bridge interface %s: %s", config.BridgeName, config.AddressIPv4)
|
||||
logrus.Debugf("Assigning address to bridge interface %s: %s", config.BridgeName, config.AddressIPv4)
|
||||
if err := i.nlh.AddrAdd(i.Link, &netlink.Addr{IPNet: config.AddressIPv4}); err != nil {
|
||||
return &IPv4AddrAddError{IP: config.AddressIPv4, Err: err}
|
||||
}
|
||||
|
|
4
vendor/github.com/docker/libnetwork/drivers/bridge/setup_verify.go
generated
vendored
4
vendor/github.com/docker/libnetwork/drivers/bridge/setup_verify.go
generated
vendored
|
@ -4,7 +4,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/ns"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/vishvananda/netlink"
|
||||
|
@ -39,7 +39,7 @@ func setupVerifyAndReconcile(config *networkConfiguration, i *bridgeInterface) e
|
|||
for _, addrv6 := range addrsv6 {
|
||||
if addrv6.IP.IsGlobalUnicast() && !types.CompareIPNet(addrv6.IPNet, i.bridgeIPv6) {
|
||||
if err := i.nlh.AddrDel(i.Link, &addrv6); err != nil {
|
||||
log.Warnf("Failed to remove residual IPv6 address %s from bridge: %v", addrv6.IPNet, err)
|
||||
logrus.Warnf("Failed to remove residual IPv6 address %s from bridge: %v", addrv6.IPNet, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
60
vendor/github.com/docker/libnetwork/drivers/overlay/encryption.go
generated
vendored
60
vendor/github.com/docker/libnetwork/drivers/overlay/encryption.go
generated
vendored
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
"strconv"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/iptables"
|
||||
"github.com/docker/libnetwork/ns"
|
||||
"github.com/docker/libnetwork/types"
|
||||
|
@ -77,7 +77,7 @@ func (e *encrMap) String() string {
|
|||
}
|
||||
|
||||
func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal, add bool) error {
|
||||
log.Debugf("checkEncryption(%s, %v, %d, %t)", nid[0:7], rIP, vxlanID, isLocal)
|
||||
logrus.Debugf("checkEncryption(%s, %v, %d, %t)", nid[0:7], rIP, vxlanID, isLocal)
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil || !n.secure {
|
||||
|
@ -100,7 +100,7 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal
|
|||
}
|
||||
return false
|
||||
}); err != nil {
|
||||
log.Warnf("Failed to retrieve list of participating nodes in overlay network %s: %v", nid[0:5], err)
|
||||
logrus.Warnf("Failed to retrieve list of participating nodes in overlay network %s: %v", nid[0:5], err)
|
||||
}
|
||||
default:
|
||||
if len(d.network(nid).endpoints) > 0 {
|
||||
|
@ -108,18 +108,18 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal
|
|||
}
|
||||
}
|
||||
|
||||
log.Debugf("List of nodes: %s", nodes)
|
||||
logrus.Debugf("List of nodes: %s", nodes)
|
||||
|
||||
if add {
|
||||
for _, rIP := range nodes {
|
||||
if err := setupEncryption(lIP, aIP, rIP, vxlanID, d.secMap, d.keys); err != nil {
|
||||
log.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err)
|
||||
logrus.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if len(nodes) == 0 {
|
||||
if err := removeEncryption(lIP, rIP, d.secMap); err != nil {
|
||||
log.Warnf("Failed to remove network encryption between %s and %s: %v", lIP, rIP, err)
|
||||
logrus.Warnf("Failed to remove network encryption between %s and %s: %v", lIP, rIP, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -128,14 +128,14 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal
|
|||
}
|
||||
|
||||
func setupEncryption(localIP, advIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*key) error {
|
||||
log.Debugf("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP)
|
||||
logrus.Debugf("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP)
|
||||
rIPs := remoteIP.String()
|
||||
|
||||
indices := make([]*spi, 0, len(keys))
|
||||
|
||||
err := programMangle(vni, true)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
for i, k := range keys {
|
||||
|
@ -146,7 +146,7 @@ func setupEncryption(localIP, advIP, remoteIP net.IP, vni uint32, em *encrMap, k
|
|||
}
|
||||
fSA, rSA, err := programSA(localIP, remoteIP, spis, k, dir, true)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
logrus.Warn(err)
|
||||
}
|
||||
indices = append(indices, spis)
|
||||
if i != 0 {
|
||||
|
@ -154,7 +154,7 @@ func setupEncryption(localIP, advIP, remoteIP net.IP, vni uint32, em *encrMap, k
|
|||
}
|
||||
err = programSP(fSA, rSA, true)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
logrus.Warn(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -179,14 +179,14 @@ func removeEncryption(localIP, remoteIP net.IP, em *encrMap) error {
|
|||
}
|
||||
fSA, rSA, err := programSA(localIP, remoteIP, idxs, nil, dir, false)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
logrus.Warn(err)
|
||||
}
|
||||
if i != 0 {
|
||||
continue
|
||||
}
|
||||
err = programSP(fSA, rSA, false)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
logrus.Warn(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -213,7 +213,7 @@ func programMangle(vni uint32, add bool) (err error) {
|
|||
}
|
||||
|
||||
if err = iptables.RawCombinedOutput(append([]string{"-t", string(iptables.Mangle), a, chain}, rule...)...); err != nil {
|
||||
log.Warnf("could not %s mangle rule: %v", action, err)
|
||||
logrus.Warnf("could not %s mangle rule: %v", action, err)
|
||||
}
|
||||
|
||||
return
|
||||
|
@ -248,9 +248,9 @@ func programSA(localIP, remoteIP net.IP, spi *spi, k *key, dir int, add bool) (f
|
|||
}
|
||||
|
||||
if add != exists {
|
||||
log.Debugf("%s: rSA{%s}", action, rSA)
|
||||
logrus.Debugf("%s: rSA{%s}", action, rSA)
|
||||
if err := xfrmProgram(rSA); err != nil {
|
||||
log.Warnf("Failed %s rSA{%s}: %v", action, rSA, err)
|
||||
logrus.Warnf("Failed %s rSA{%s}: %v", action, rSA, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -273,9 +273,9 @@ func programSA(localIP, remoteIP net.IP, spi *spi, k *key, dir int, add bool) (f
|
|||
}
|
||||
|
||||
if add != exists {
|
||||
log.Debugf("%s fSA{%s}", action, fSA)
|
||||
logrus.Debugf("%s fSA{%s}", action, fSA)
|
||||
if err := xfrmProgram(fSA); err != nil {
|
||||
log.Warnf("Failed %s fSA{%s}: %v.", action, fSA, err)
|
||||
logrus.Warnf("Failed %s fSA{%s}: %v.", action, fSA, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -319,9 +319,9 @@ func programSP(fSA *netlink.XfrmState, rSA *netlink.XfrmState, add bool) error {
|
|||
}
|
||||
|
||||
if add != exists {
|
||||
log.Debugf("%s fSP{%s}", action, fPol)
|
||||
logrus.Debugf("%s fSP{%s}", action, fPol)
|
||||
if err := xfrmProgram(fPol); err != nil {
|
||||
log.Warnf("%s fSP{%s}: %v", action, fPol, err)
|
||||
logrus.Warnf("%s fSP{%s}: %v", action, fPol, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -337,7 +337,7 @@ func saExists(sa *netlink.XfrmState) (bool, error) {
|
|||
return false, nil
|
||||
default:
|
||||
err = fmt.Errorf("Error while checking for SA existence: %v", err)
|
||||
log.Warn(err)
|
||||
logrus.Warn(err)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
@ -351,7 +351,7 @@ func spExists(sp *netlink.XfrmPolicy) (bool, error) {
|
|||
return false, nil
|
||||
default:
|
||||
err = fmt.Errorf("Error while checking for SP existence: %v", err)
|
||||
log.Warn(err)
|
||||
logrus.Warn(err)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
@ -397,16 +397,16 @@ func (d *driver) setKeys(keys []*key) error {
|
|||
d.keys = keys
|
||||
d.secMap = &encrMap{nodes: map[string][]*spi{}}
|
||||
d.Unlock()
|
||||
log.Debugf("Initial encryption keys: %v", d.keys)
|
||||
logrus.Debugf("Initial encryption keys: %v", d.keys)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateKeys allows to add a new key and/or change the primary key and/or prune an existing key
|
||||
// The primary key is the key used in transmission and will go in first position in the list.
|
||||
func (d *driver) updateKeys(newKey, primary, pruneKey *key) error {
|
||||
log.Debugf("Updating Keys. New: %v, Primary: %v, Pruned: %v", newKey, primary, pruneKey)
|
||||
logrus.Debugf("Updating Keys. New: %v, Primary: %v, Pruned: %v", newKey, primary, pruneKey)
|
||||
|
||||
log.Debugf("Current: %v", d.keys)
|
||||
logrus.Debugf("Current: %v", d.keys)
|
||||
|
||||
var (
|
||||
newIdx = -1
|
||||
|
@ -459,7 +459,7 @@ func (d *driver) updateKeys(newKey, primary, pruneKey *key) error {
|
|||
}
|
||||
d.Unlock()
|
||||
|
||||
log.Debugf("Updated: %v", d.keys)
|
||||
logrus.Debugf("Updated: %v", d.keys)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -472,10 +472,10 @@ func (d *driver) updateKeys(newKey, primary, pruneKey *key) error {
|
|||
|
||||
// Spis and keys are sorted in such away the one in position 0 is the primary
|
||||
func updateNodeKey(lIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx, delIdx int) []*spi {
|
||||
log.Debugf("Updating keys for node: %s (%d,%d,%d)", rIP, newIdx, priIdx, delIdx)
|
||||
logrus.Debugf("Updating keys for node: %s (%d,%d,%d)", rIP, newIdx, priIdx, delIdx)
|
||||
|
||||
spis := idxs
|
||||
log.Debugf("Current: %v", spis)
|
||||
logrus.Debugf("Current: %v", spis)
|
||||
|
||||
// add new
|
||||
if newIdx != -1 {
|
||||
|
@ -520,9 +520,9 @@ func updateNodeKey(lIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx,
|
|||
},
|
||||
},
|
||||
}
|
||||
log.Debugf("Updating fSP{%s}", fSP1)
|
||||
logrus.Debugf("Updating fSP{%s}", fSP1)
|
||||
if err := ns.NlHandle().XfrmPolicyUpdate(fSP1); err != nil {
|
||||
log.Warnf("Failed to update fSP{%s}: %v", fSP1, err)
|
||||
logrus.Warnf("Failed to update fSP{%s}: %v", fSP1, err)
|
||||
}
|
||||
|
||||
// -fSA1
|
||||
|
@ -543,7 +543,7 @@ func updateNodeKey(lIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx,
|
|||
spis = append(spis[:delIdx], spis[delIdx+1:]...)
|
||||
}
|
||||
|
||||
log.Debugf("Updated: %v", spis)
|
||||
logrus.Debugf("Updated: %v", spis)
|
||||
|
||||
return spis
|
||||
}
|
||||
|
|
20
vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go
generated
vendored
20
vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go
generated
vendored
|
@ -5,7 +5,7 @@ import (
|
|||
"net"
|
||||
"syscall"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/ns"
|
||||
"github.com/docker/libnetwork/types"
|
||||
|
@ -109,7 +109,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
|
|||
continue
|
||||
}
|
||||
if err := jinfo.AddStaticRoute(sub.subnetIP, types.NEXTHOP, s.gwIP.IP); err != nil {
|
||||
log.Errorf("Adding subnet %s static route in network %q failed\n", s.subnetIP, n.id)
|
||||
logrus.Errorf("Adding subnet %s static route in network %q failed\n", s.subnetIP, n.id)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -124,7 +124,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
|
|||
net.ParseIP(d.advertiseAddress), true)
|
||||
|
||||
if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
|
||||
log.Warn(err)
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
buf, err := proto.Marshal(&PeerRecord{
|
||||
|
@ -137,7 +137,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
|
|||
}
|
||||
|
||||
if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil {
|
||||
log.Errorf("overlay: Failed adding table entry to joininfo: %v", err)
|
||||
logrus.Errorf("overlay: Failed adding table entry to joininfo: %v", err)
|
||||
}
|
||||
|
||||
d.pushLocalEndpointEvent("join", nid, eid)
|
||||
|
@ -147,7 +147,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
|
|||
|
||||
func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) {
|
||||
if tableName != ovPeerTable {
|
||||
log.Errorf("Unexpected table notification for table %s received", tableName)
|
||||
logrus.Errorf("Unexpected table notification for table %s received", tableName)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,7 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri
|
|||
|
||||
var peer PeerRecord
|
||||
if err := proto.Unmarshal(value, &peer); err != nil {
|
||||
log.Errorf("Failed to unmarshal peer record: %v", err)
|
||||
logrus.Errorf("Failed to unmarshal peer record: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -167,19 +167,19 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri
|
|||
|
||||
addr, err := types.ParseCIDR(peer.EndpointIP)
|
||||
if err != nil {
|
||||
log.Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP)
|
||||
logrus.Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP)
|
||||
return
|
||||
}
|
||||
|
||||
mac, err := net.ParseMAC(peer.EndpointMAC)
|
||||
if err != nil {
|
||||
log.Errorf("Invalid mac %s received in event notify", peer.EndpointMAC)
|
||||
logrus.Errorf("Invalid mac %s received in event notify", peer.EndpointMAC)
|
||||
return
|
||||
}
|
||||
|
||||
vtep := net.ParseIP(peer.TunnelEndpointIP)
|
||||
if vtep == nil {
|
||||
log.Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP)
|
||||
logrus.Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -219,7 +219,7 @@ func (d *driver) Leave(nid, eid string) error {
|
|||
n.leaveSandbox()
|
||||
|
||||
if err := d.checkEncryption(nid, nil, 0, true, false); err != nil {
|
||||
log.Warn(err)
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
8
vendor/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go
generated
vendored
8
vendor/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go
generated
vendored
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/netutils"
|
||||
|
@ -116,7 +116,7 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
|
|||
n.deleteEndpoint(eid)
|
||||
|
||||
if err := d.deleteEndpointFromStore(ep); err != nil {
|
||||
log.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err)
|
||||
logrus.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err)
|
||||
}
|
||||
|
||||
if ep.ifName == "" {
|
||||
|
@ -125,11 +125,11 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
|
|||
|
||||
link, err := nlh.LinkByName(ep.ifName)
|
||||
if err != nil {
|
||||
log.Debugf("Failed to retrieve interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err)
|
||||
logrus.Debugf("Failed to retrieve interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err)
|
||||
return nil
|
||||
}
|
||||
if err := nlh.LinkDel(link); err != nil {
|
||||
log.Debugf("Failed to delete interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err)
|
||||
logrus.Debugf("Failed to delete interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
2
vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go
generated
vendored
2
vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go
generated
vendored
|
@ -111,7 +111,7 @@ func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
|
|||
// Endpoints are stored in the local store. Restore them and reconstruct the overlay sandbox
|
||||
func (d *driver) restoreEndpoints() error {
|
||||
if d.localStore == nil {
|
||||
logrus.Warnf("Cannot restore overlay endpoints because local datastore is missing")
|
||||
logrus.Warn("Cannot restore overlay endpoints because local datastore is missing")
|
||||
return nil
|
||||
}
|
||||
kvol, err := d.localStore.List(datastore.Key(overlayEndpointPrefix), &endpoint{})
|
||||
|
|
6
vendor/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go
generated
vendored
6
vendor/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go
generated
vendored
|
@ -18,7 +18,7 @@ import (
|
|||
|
||||
const (
|
||||
networkType = "overlay"
|
||||
vxlanIDStart = 256
|
||||
vxlanIDStart = 4096
|
||||
vxlanIDEnd = (1 << 24) - 1
|
||||
)
|
||||
|
||||
|
@ -57,7 +57,7 @@ func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
|
|||
config: config,
|
||||
}
|
||||
|
||||
d.vxlanIdm, err = idm.New(nil, "vxlan-id", vxlanIDStart, vxlanIDEnd)
|
||||
d.vxlanIdm, err = idm.New(nil, "vxlan-id", 1, vxlanIDEnd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize vxlan id manager: %v", err)
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ func (n *network) obtainVxlanID(s *subnet) error {
|
|||
n.Unlock()
|
||||
|
||||
if vni == 0 {
|
||||
vni, err = n.driver.vxlanIdm.GetID()
|
||||
vni, err = n.driver.vxlanIdm.GetIDInRange(vxlanIDStart, vxlanIDEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
8
vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go
generated
vendored
8
vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go
generated
vendored
|
@ -6,7 +6,7 @@ import (
|
|||
"sync"
|
||||
"syscall"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
const ovPeerTable = "overlay_peer_table"
|
||||
|
@ -90,7 +90,7 @@ func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool
|
|||
for pKeyStr, pEntry := range pMap.mp {
|
||||
var pKey peerKey
|
||||
if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {
|
||||
log.Warnf("Peer key scan on network %s failed: %v", nid, err)
|
||||
logrus.Warnf("Peer key scan on network %s failed: %v", nid, err)
|
||||
}
|
||||
|
||||
if f(&pKey, &pEntry) {
|
||||
|
@ -289,7 +289,7 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
|||
}
|
||||
|
||||
if err := d.checkEncryption(nid, vtep, n.vxlanID(s), false, true); err != nil {
|
||||
log.Warn(err)
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
// Add neighbor entry for the peer IP
|
||||
|
@ -349,7 +349,7 @@ func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMas
|
|||
}
|
||||
|
||||
if err := d.checkEncryption(nid, vtep, 0, false, false); err != nil {
|
||||
log.Warn(err)
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
6
vendor/github.com/docker/libnetwork/drivers/remote/driver.go
generated
vendored
6
vendor/github.com/docker/libnetwork/drivers/remote/driver.go
generated
vendored
|
@ -4,7 +4,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/discoverapi"
|
||||
|
@ -39,11 +39,11 @@ func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
|
|||
d := newDriver(name, client)
|
||||
c, err := d.(*driver).getCapabilities()
|
||||
if err != nil {
|
||||
log.Errorf("error getting capability for %s due to %v", name, err)
|
||||
logrus.Errorf("error getting capability for %s due to %v", name, err)
|
||||
return
|
||||
}
|
||||
if err = dc.RegisterDriver(name, d, *c); err != nil {
|
||||
log.Errorf("error registering driver for %s due to %v", name, err)
|
||||
logrus.Errorf("error registering driver for %s due to %v", name, err)
|
||||
}
|
||||
})
|
||||
return nil
|
||||
|
|
10
vendor/github.com/docker/libnetwork/drivers/solaris/bridge/bridge.go
generated
vendored
10
vendor/github.com/docker/libnetwork/drivers/solaris/bridge/bridge.go
generated
vendored
|
@ -390,7 +390,7 @@ func bridgeSetup(config *networkConfiguration) error {
|
|||
"/usr/bin/grep " + config.DefaultBindingIP.String()
|
||||
out, err := exec.Command("/usr/bin/bash", "-c", ipadmCmd).Output()
|
||||
if err != nil {
|
||||
logrus.Warnf("cannot find binding interface")
|
||||
logrus.Warn("cannot find binding interface")
|
||||
return err
|
||||
}
|
||||
bindingIntf = strings.SplitN(string(out), "/", 2)[0]
|
||||
|
@ -456,21 +456,21 @@ func bridgeCleanup(config *networkConfiguration, logErr bool) {
|
|||
|
||||
err = exec.Command("/usr/sbin/pfctl", "-a", pfAnchor, "-F", "all").Run()
|
||||
if err != nil && logErr {
|
||||
logrus.Warnf("cannot flush firewall rules")
|
||||
logrus.Warn("cannot flush firewall rules")
|
||||
}
|
||||
err = exec.Command("/usr/sbin/ifconfig", gwName, "unplumb").Run()
|
||||
if err != nil && logErr {
|
||||
logrus.Warnf("cannot remove gateway interface")
|
||||
logrus.Warn("cannot remove gateway interface")
|
||||
}
|
||||
err = exec.Command("/usr/sbin/dladm", "delete-vnic",
|
||||
"-t", gwName).Run()
|
||||
if err != nil && logErr {
|
||||
logrus.Warnf("cannot delete vnic")
|
||||
logrus.Warn("cannot delete vnic")
|
||||
}
|
||||
err = exec.Command("/usr/sbin/dladm", "delete-etherstub",
|
||||
"-t", config.BridgeNameInternal).Run()
|
||||
if err != nil && logErr {
|
||||
logrus.Warnf("cannot delete etherstub")
|
||||
logrus.Warn("cannot delete etherstub")
|
||||
}
|
||||
err = exec.Command("/usr/sbin/pfctl", "-a", tableAnchor, "-t", tableName, "-T", "delete", gwIP).Run()
|
||||
if err != nil && logErr {
|
||||
|
|
2
vendor/github.com/docker/libnetwork/drivers/solaris/bridge/port_mapping.go
generated
vendored
2
vendor/github.com/docker/libnetwork/drivers/solaris/bridge/port_mapping.go
generated
vendored
|
@ -36,7 +36,7 @@ func addPFRules(epid, bindIntf string, bs []types.PortBinding) {
|
|||
f, err := os.OpenFile(fname,
|
||||
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
|
||||
if err != nil {
|
||||
logrus.Warnf("cannot open temp pf file")
|
||||
logrus.Warn("cannot open temp pf file")
|
||||
return
|
||||
}
|
||||
for _, b := range bs {
|
||||
|
|
274
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/encryption.go
generated
vendored
Normal file
274
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/encryption.go
generated
vendored
Normal file
|
@ -0,0 +1,274 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
||||
const (
|
||||
mark = uint32(0xD0C4E3)
|
||||
timeout = 30
|
||||
pktExpansion = 26 // SPI(4) + SeqN(4) + IV(8) + PadLength(1) + NextHeader(1) + ICV(8)
|
||||
)
|
||||
|
||||
const (
|
||||
forward = iota + 1
|
||||
reverse
|
||||
bidir
|
||||
)
|
||||
|
||||
type key struct {
|
||||
value []byte
|
||||
tag uint32
|
||||
}
|
||||
|
||||
func (k *key) String() string {
|
||||
if k != nil {
|
||||
return fmt.Sprintf("(key: %s, tag: 0x%x)", hex.EncodeToString(k.value)[0:5], k.tag)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type spi struct {
|
||||
forward int
|
||||
reverse int
|
||||
}
|
||||
|
||||
func (s *spi) String() string {
|
||||
return fmt.Sprintf("SPI(FWD: 0x%x, REV: 0x%x)", uint32(s.forward), uint32(s.reverse))
|
||||
}
|
||||
|
||||
type encrMap struct {
|
||||
nodes map[string][]*spi
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (e *encrMap) String() string {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
b := new(bytes.Buffer)
|
||||
for k, v := range e.nodes {
|
||||
b.WriteString("\n")
|
||||
b.WriteString(k)
|
||||
b.WriteString(":")
|
||||
b.WriteString("[")
|
||||
for _, s := range v {
|
||||
b.WriteString(s.String())
|
||||
b.WriteString(",")
|
||||
}
|
||||
b.WriteString("]")
|
||||
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal, add bool) error {
|
||||
logrus.Debugf("checkEncryption(%s, %v, %d, %t)", nid[0:7], rIP, vxlanID, isLocal)
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil || !n.secure {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(d.keys) == 0 {
|
||||
return types.ForbiddenErrorf("encryption key is not present")
|
||||
}
|
||||
|
||||
lIP := net.ParseIP(d.bindAddress)
|
||||
aIP := net.ParseIP(d.advertiseAddress)
|
||||
nodes := map[string]net.IP{}
|
||||
|
||||
switch {
|
||||
case isLocal:
|
||||
if err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
|
||||
if !aIP.Equal(pEntry.vtep) {
|
||||
nodes[pEntry.vtep.String()] = pEntry.vtep
|
||||
}
|
||||
return false
|
||||
}); err != nil {
|
||||
logrus.Warnf("Failed to retrieve list of participating nodes in overlay network %s: %v", nid[0:5], err)
|
||||
}
|
||||
default:
|
||||
if len(d.network(nid).endpoints) > 0 {
|
||||
nodes[rIP.String()] = rIP
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("List of nodes: %s", nodes)
|
||||
|
||||
if add {
|
||||
for _, rIP := range nodes {
|
||||
if err := setupEncryption(lIP, aIP, rIP, vxlanID, d.secMap, d.keys); err != nil {
|
||||
logrus.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if len(nodes) == 0 {
|
||||
if err := removeEncryption(lIP, rIP, d.secMap); err != nil {
|
||||
logrus.Warnf("Failed to remove network encryption between %s and %s: %v", lIP, rIP, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupEncryption(localIP, advIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*key) error {
|
||||
logrus.Debugf("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP)
|
||||
rIPs := remoteIP.String()
|
||||
|
||||
indices := make([]*spi, 0, len(keys))
|
||||
|
||||
err := programMangle(vni, true)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
em.Lock()
|
||||
em.nodes[rIPs] = indices
|
||||
em.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeEncryption(localIP, remoteIP net.IP, em *encrMap) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func programMangle(vni uint32, add bool) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func buildSPI(src, dst net.IP, st uint32) int {
|
||||
b := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(b, st)
|
||||
h := fnv.New32a()
|
||||
h.Write(src)
|
||||
h.Write(b)
|
||||
h.Write(dst)
|
||||
return int(binary.BigEndian.Uint32(h.Sum(nil)))
|
||||
}
|
||||
|
||||
func (d *driver) secMapWalk(f func(string, []*spi) ([]*spi, bool)) error {
|
||||
d.secMap.Lock()
|
||||
for node, indices := range d.secMap.nodes {
|
||||
idxs, stop := f(node, indices)
|
||||
if idxs != nil {
|
||||
d.secMap.nodes[node] = idxs
|
||||
}
|
||||
if stop {
|
||||
break
|
||||
}
|
||||
}
|
||||
d.secMap.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) setKeys(keys []*key) error {
|
||||
if d.keys != nil {
|
||||
return types.ForbiddenErrorf("initial keys are already present")
|
||||
}
|
||||
d.keys = keys
|
||||
logrus.Debugf("Initial encryption keys: %v", d.keys)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateKeys allows to add a new key and/or change the primary key and/or prune an existing key
|
||||
// The primary key is the key used in transmission and will go in first position in the list.
|
||||
func (d *driver) updateKeys(newKey, primary, pruneKey *key) error {
|
||||
logrus.Debugf("Updating Keys. New: %v, Primary: %v, Pruned: %v", newKey, primary, pruneKey)
|
||||
|
||||
logrus.Debugf("Current: %v", d.keys)
|
||||
|
||||
var (
|
||||
newIdx = -1
|
||||
priIdx = -1
|
||||
delIdx = -1
|
||||
lIP = net.ParseIP(d.bindAddress)
|
||||
)
|
||||
|
||||
d.Lock()
|
||||
// add new
|
||||
if newKey != nil {
|
||||
d.keys = append(d.keys, newKey)
|
||||
newIdx += len(d.keys)
|
||||
}
|
||||
for i, k := range d.keys {
|
||||
if primary != nil && k.tag == primary.tag {
|
||||
priIdx = i
|
||||
}
|
||||
if pruneKey != nil && k.tag == pruneKey.tag {
|
||||
delIdx = i
|
||||
}
|
||||
}
|
||||
d.Unlock()
|
||||
|
||||
if (newKey != nil && newIdx == -1) ||
|
||||
(primary != nil && priIdx == -1) ||
|
||||
(pruneKey != nil && delIdx == -1) {
|
||||
err := types.BadRequestErrorf("cannot find proper key indices while processing key update:"+
|
||||
"(newIdx,priIdx,delIdx):(%d, %d, %d)", newIdx, priIdx, delIdx)
|
||||
logrus.Warn(err)
|
||||
return err
|
||||
}
|
||||
|
||||
d.secMapWalk(func(rIPs string, spis []*spi) ([]*spi, bool) {
|
||||
rIP := net.ParseIP(rIPs)
|
||||
return updateNodeKey(lIP, rIP, spis, d.keys, newIdx, priIdx, delIdx), false
|
||||
})
|
||||
|
||||
d.Lock()
|
||||
// swap primary
|
||||
if priIdx != -1 {
|
||||
swp := d.keys[0]
|
||||
d.keys[0] = d.keys[priIdx]
|
||||
d.keys[priIdx] = swp
|
||||
}
|
||||
// prune
|
||||
if delIdx != -1 {
|
||||
if delIdx == 0 {
|
||||
delIdx = priIdx
|
||||
}
|
||||
d.keys = append(d.keys[:delIdx], d.keys[delIdx+1:]...)
|
||||
}
|
||||
d.Unlock()
|
||||
|
||||
logrus.Debugf("Updated: %v", d.keys)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/********************************************************
|
||||
* Steady state: rSA0, rSA1, rSA2, fSA1, fSP1
|
||||
* Rotation --> -rSA0, +rSA3, +fSA2, +fSP2/-fSP1, -fSA1
|
||||
* Steady state: rSA1, rSA2, rSA3, fSA2, fSP2
|
||||
*********************************************************/
|
||||
|
||||
// Spis and keys are sorted in such away the one in position 0 is the primary
|
||||
func updateNodeKey(lIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx, delIdx int) []*spi {
|
||||
logrus.Debugf("Updating keys for node: %s (%d,%d,%d)", rIP, newIdx, priIdx, delIdx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) maxMTU() int {
|
||||
mtu := 1500
|
||||
if n.mtu != 0 {
|
||||
mtu = n.mtu
|
||||
}
|
||||
mtu -= vxlanEncap
|
||||
if n.secure {
|
||||
// In case of encryption account for the
|
||||
// esp packet espansion and padding
|
||||
mtu -= pktExpansion
|
||||
mtu -= (mtu % 4)
|
||||
}
|
||||
return mtu
|
||||
}
|
184
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/joinleave.go
generated
vendored
Normal file
184
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/joinleave.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
// Join method is invoked when a Sandbox is attached to an endpoint.
|
||||
func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return fmt.Errorf("could not find network with id %s", nid)
|
||||
}
|
||||
|
||||
ep := n.endpoint(eid)
|
||||
if ep == nil {
|
||||
return fmt.Errorf("could not find endpoint with id %s", eid)
|
||||
}
|
||||
|
||||
if n.secure && len(d.keys) == 0 {
|
||||
return fmt.Errorf("cannot join secure network: encryption keys not present")
|
||||
}
|
||||
|
||||
s := n.getSubnetforIP(ep.addr)
|
||||
if s == nil {
|
||||
return fmt.Errorf("could not find subnet for endpoint %s", eid)
|
||||
}
|
||||
|
||||
if err := n.obtainVxlanID(s); err != nil {
|
||||
return fmt.Errorf("couldn't get vxlan id for %q: %v", s.subnetIP.String(), err)
|
||||
}
|
||||
|
||||
if err := n.joinSandbox(false); err != nil {
|
||||
return fmt.Errorf("network sandbox join failed: %v", err)
|
||||
}
|
||||
|
||||
if err := n.joinSubnetSandbox(s, false); err != nil {
|
||||
return fmt.Errorf("subnet sandbox join failed for %q: %v", s.subnetIP.String(), err)
|
||||
}
|
||||
|
||||
// joinSubnetSandbox gets called when an endpoint comes up on a new subnet in the
|
||||
// overlay network. Hence the Endpoint count should be updated outside joinSubnetSandbox
|
||||
n.incEndpointCount()
|
||||
|
||||
// Add creating a veth Pair for Solaris
|
||||
|
||||
containerIfName := "solaris-if"
|
||||
ep.ifName = containerIfName
|
||||
|
||||
if err := d.writeEndpointToStore(ep); err != nil {
|
||||
return fmt.Errorf("failed to update overlay endpoint %s to local data store: %v", ep.id[0:7], err)
|
||||
}
|
||||
|
||||
// Add solaris plumbing to add veth (with ep mac addr) to sandbox
|
||||
|
||||
for _, sub := range n.subnets {
|
||||
if sub == s {
|
||||
continue
|
||||
}
|
||||
if err := jinfo.AddStaticRoute(sub.subnetIP, types.NEXTHOP, s.gwIP.IP); err != nil {
|
||||
logrus.Errorf("Adding subnet %s static route in network %q failed\n", s.subnetIP, n.id)
|
||||
}
|
||||
}
|
||||
|
||||
if iNames := jinfo.InterfaceName(); iNames != nil {
|
||||
err := iNames.SetNames(containerIfName, "eth")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.peerDbAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac,
|
||||
net.ParseIP(d.advertiseAddress), true)
|
||||
|
||||
if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
buf, err := proto.Marshal(&PeerRecord{
|
||||
EndpointIP: ep.addr.String(),
|
||||
EndpointMAC: ep.mac.String(),
|
||||
TunnelEndpointIP: d.advertiseAddress,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil {
|
||||
logrus.Errorf("overlay: Failed adding table entry to joininfo: %v", err)
|
||||
}
|
||||
|
||||
d.pushLocalEndpointEvent("join", nid, eid)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) {
|
||||
if tableName != ovPeerTable {
|
||||
logrus.Errorf("Unexpected table notification for table %s received", tableName)
|
||||
return
|
||||
}
|
||||
|
||||
eid := key
|
||||
|
||||
var peer PeerRecord
|
||||
if err := proto.Unmarshal(value, &peer); err != nil {
|
||||
logrus.Errorf("Failed to unmarshal peer record: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore local peers. We already know about them and they
|
||||
// should not be added to vxlan fdb.
|
||||
if peer.TunnelEndpointIP == d.advertiseAddress {
|
||||
return
|
||||
}
|
||||
|
||||
addr, err := types.ParseCIDR(peer.EndpointIP)
|
||||
if err != nil {
|
||||
logrus.Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP)
|
||||
return
|
||||
}
|
||||
|
||||
mac, err := net.ParseMAC(peer.EndpointMAC)
|
||||
if err != nil {
|
||||
logrus.Errorf("Invalid mac %s received in event notify", peer.EndpointMAC)
|
||||
return
|
||||
}
|
||||
|
||||
vtep := net.ParseIP(peer.TunnelEndpointIP)
|
||||
if vtep == nil {
|
||||
logrus.Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP)
|
||||
return
|
||||
}
|
||||
|
||||
if etype == driverapi.Delete {
|
||||
d.peerDelete(nid, eid, addr.IP, addr.Mask, mac, vtep, true)
|
||||
return
|
||||
}
|
||||
|
||||
d.peerAdd(nid, eid, addr.IP, addr.Mask, mac, vtep, true)
|
||||
}
|
||||
|
||||
// Leave method is invoked when a Sandbox detaches from an endpoint.
|
||||
func (d *driver) Leave(nid, eid string) error {
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return fmt.Errorf("could not find network with id %s", nid)
|
||||
}
|
||||
|
||||
ep := n.endpoint(eid)
|
||||
|
||||
if ep == nil {
|
||||
return types.InternalMaskableErrorf("could not find endpoint with id %s", eid)
|
||||
}
|
||||
|
||||
if d.notifyCh != nil {
|
||||
d.notifyCh <- ovNotify{
|
||||
action: "leave",
|
||||
nw: n,
|
||||
ep: ep,
|
||||
}
|
||||
}
|
||||
|
||||
n.leaveSandbox()
|
||||
|
||||
if err := d.checkEncryption(nid, nil, 0, true, false); err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
249
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_endpoint.go
generated
vendored
Normal file
249
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_endpoint.go
generated
vendored
Normal file
|
@ -0,0 +1,249 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/netutils"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
||||
type endpointTable map[string]*endpoint
|
||||
|
||||
const overlayEndpointPrefix = "overlay/endpoint"
|
||||
|
||||
type endpoint struct {
|
||||
id string
|
||||
nid string
|
||||
ifName string
|
||||
mac net.HardwareAddr
|
||||
addr *net.IPNet
|
||||
dbExists bool
|
||||
dbIndex uint64
|
||||
}
|
||||
|
||||
func (n *network) endpoint(eid string) *endpoint {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
return n.endpoints[eid]
|
||||
}
|
||||
|
||||
func (n *network) addEndpoint(ep *endpoint) {
|
||||
n.Lock()
|
||||
n.endpoints[ep.id] = ep
|
||||
n.Unlock()
|
||||
}
|
||||
|
||||
func (n *network) deleteEndpoint(eid string) {
|
||||
n.Lock()
|
||||
delete(n.endpoints, eid)
|
||||
n.Unlock()
|
||||
}
|
||||
|
||||
func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
|
||||
epOptions map[string]interface{}) error {
|
||||
var err error
|
||||
|
||||
if err = validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since we perform lazy configuration make sure we try
|
||||
// configuring the driver when we enter CreateEndpoint since
|
||||
// CreateNetwork may not be called in every node.
|
||||
if err := d.configure(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return fmt.Errorf("network id %q not found", nid)
|
||||
}
|
||||
|
||||
ep := &endpoint{
|
||||
id: eid,
|
||||
nid: n.id,
|
||||
addr: ifInfo.Address(),
|
||||
mac: ifInfo.MacAddress(),
|
||||
}
|
||||
if ep.addr == nil {
|
||||
return fmt.Errorf("create endpoint was not passed interface IP address")
|
||||
}
|
||||
|
||||
if s := n.getSubnetforIP(ep.addr); s == nil {
|
||||
return fmt.Errorf("no matching subnet for IP %q in network %q\n", ep.addr, nid)
|
||||
}
|
||||
|
||||
if ep.mac == nil {
|
||||
ep.mac = netutils.GenerateMACFromIP(ep.addr.IP)
|
||||
if err := ifInfo.SetMacAddress(ep.mac); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
n.addEndpoint(ep)
|
||||
|
||||
if err := d.writeEndpointToStore(ep); err != nil {
|
||||
return fmt.Errorf("failed to update overlay endpoint %s to local store: %v", ep.id[0:7], err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) DeleteEndpoint(nid, eid string) error {
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return fmt.Errorf("network id %q not found", nid)
|
||||
}
|
||||
|
||||
ep := n.endpoint(eid)
|
||||
if ep == nil {
|
||||
return fmt.Errorf("endpoint id %q not found", eid)
|
||||
}
|
||||
|
||||
n.deleteEndpoint(eid)
|
||||
|
||||
if err := d.deleteEndpointFromStore(ep); err != nil {
|
||||
logrus.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err)
|
||||
}
|
||||
|
||||
if ep.ifName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// OVERLAY_SOLARIS: Add Solaris unplumbing for removing the interface endpoint
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) {
|
||||
return make(map[string]interface{}, 0), nil
|
||||
}
|
||||
|
||||
func (d *driver) deleteEndpointFromStore(e *endpoint) error {
|
||||
if d.localStore == nil {
|
||||
return fmt.Errorf("overlay local store not initialized, ep not deleted")
|
||||
}
|
||||
|
||||
if err := d.localStore.DeleteObjectAtomic(e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) writeEndpointToStore(e *endpoint) error {
|
||||
if d.localStore == nil {
|
||||
return fmt.Errorf("overlay local store not initialized, ep not added")
|
||||
}
|
||||
|
||||
if err := d.localStore.PutObjectAtomic(e); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ep *endpoint) DataScope() string {
|
||||
return datastore.LocalScope
|
||||
}
|
||||
|
||||
func (ep *endpoint) New() datastore.KVObject {
|
||||
return &endpoint{}
|
||||
}
|
||||
|
||||
func (ep *endpoint) CopyTo(o datastore.KVObject) error {
|
||||
dstep := o.(*endpoint)
|
||||
*dstep = *ep
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ep *endpoint) Key() []string {
|
||||
return []string{overlayEndpointPrefix, ep.id}
|
||||
}
|
||||
|
||||
func (ep *endpoint) KeyPrefix() []string {
|
||||
return []string{overlayEndpointPrefix}
|
||||
}
|
||||
|
||||
func (ep *endpoint) Index() uint64 {
|
||||
return ep.dbIndex
|
||||
}
|
||||
|
||||
func (ep *endpoint) SetIndex(index uint64) {
|
||||
ep.dbIndex = index
|
||||
ep.dbExists = true
|
||||
}
|
||||
|
||||
func (ep *endpoint) Exists() bool {
|
||||
return ep.dbExists
|
||||
}
|
||||
|
||||
func (ep *endpoint) Skip() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (ep *endpoint) Value() []byte {
|
||||
b, err := json.Marshal(ep)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (ep *endpoint) SetValue(value []byte) error {
|
||||
return json.Unmarshal(value, ep)
|
||||
}
|
||||
|
||||
func (ep *endpoint) MarshalJSON() ([]byte, error) {
|
||||
epMap := make(map[string]interface{})
|
||||
|
||||
epMap["id"] = ep.id
|
||||
epMap["nid"] = ep.nid
|
||||
if ep.ifName != "" {
|
||||
epMap["ifName"] = ep.ifName
|
||||
}
|
||||
if ep.addr != nil {
|
||||
epMap["addr"] = ep.addr.String()
|
||||
}
|
||||
if len(ep.mac) != 0 {
|
||||
epMap["mac"] = ep.mac.String()
|
||||
}
|
||||
|
||||
return json.Marshal(epMap)
|
||||
}
|
||||
|
||||
func (ep *endpoint) UnmarshalJSON(value []byte) error {
|
||||
var (
|
||||
err error
|
||||
epMap map[string]interface{}
|
||||
)
|
||||
|
||||
json.Unmarshal(value, &epMap)
|
||||
|
||||
ep.id = epMap["id"].(string)
|
||||
ep.nid = epMap["nid"].(string)
|
||||
if v, ok := epMap["mac"]; ok {
|
||||
if ep.mac, err = net.ParseMAC(v.(string)); err != nil {
|
||||
return types.InternalErrorf("failed to decode endpoint interface mac address after json unmarshal: %s", v.(string))
|
||||
}
|
||||
}
|
||||
if v, ok := epMap["addr"]; ok {
|
||||
if ep.addr, err = types.ParseCIDR(v.(string)); err != nil {
|
||||
return types.InternalErrorf("failed to decode endpoint interface ipv4 address after json unmarshal: %v", err)
|
||||
}
|
||||
}
|
||||
if v, ok := epMap["ifName"]; ok {
|
||||
ep.ifName = v.(string)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
791
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go
generated
vendored
Normal file
791
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go
generated
vendored
Normal file
|
@ -0,0 +1,791 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/netlabel"
|
||||
"github.com/docker/libnetwork/netutils"
|
||||
"github.com/docker/libnetwork/osl"
|
||||
"github.com/docker/libnetwork/resolvconf"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
||||
var (
|
||||
hostMode bool
|
||||
networkOnce sync.Once
|
||||
networkMu sync.Mutex
|
||||
vniTbl = make(map[uint32]string)
|
||||
)
|
||||
|
||||
type networkTable map[string]*network
|
||||
|
||||
type subnet struct {
|
||||
once *sync.Once
|
||||
vxlanName string
|
||||
brName string
|
||||
vni uint32
|
||||
initErr error
|
||||
subnetIP *net.IPNet
|
||||
gwIP *net.IPNet
|
||||
}
|
||||
|
||||
type subnetJSON struct {
|
||||
SubnetIP string
|
||||
GwIP string
|
||||
Vni uint32
|
||||
}
|
||||
|
||||
type network struct {
|
||||
id string
|
||||
dbIndex uint64
|
||||
dbExists bool
|
||||
sbox osl.Sandbox
|
||||
endpoints endpointTable
|
||||
driver *driver
|
||||
joinCnt int
|
||||
once *sync.Once
|
||||
initEpoch int
|
||||
initErr error
|
||||
subnets []*subnet
|
||||
secure bool
|
||||
mtu int
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) {
|
||||
return nil, types.NotImplementedErrorf("not implemented")
|
||||
}
|
||||
|
||||
func (d *driver) NetworkFree(id string) error {
|
||||
return types.NotImplementedErrorf("not implemented")
|
||||
}
|
||||
|
||||
func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error {
|
||||
if id == "" {
|
||||
return fmt.Errorf("invalid network id")
|
||||
}
|
||||
if len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == "0.0.0.0/0" {
|
||||
return types.BadRequestErrorf("ipv4 pool is empty")
|
||||
}
|
||||
|
||||
// Since we perform lazy configuration make sure we try
|
||||
// configuring the driver when we enter CreateNetwork
|
||||
if err := d.configure(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := &network{
|
||||
id: id,
|
||||
driver: d,
|
||||
endpoints: endpointTable{},
|
||||
once: &sync.Once{},
|
||||
subnets: []*subnet{},
|
||||
}
|
||||
|
||||
vnis := make([]uint32, 0, len(ipV4Data))
|
||||
if gval, ok := option[netlabel.GenericData]; ok {
|
||||
optMap := gval.(map[string]string)
|
||||
if val, ok := optMap[netlabel.OverlayVxlanIDList]; ok {
|
||||
logrus.Debugf("overlay: Received vxlan IDs: %s", val)
|
||||
vniStrings := strings.Split(val, ",")
|
||||
for _, vniStr := range vniStrings {
|
||||
vni, err := strconv.Atoi(vniStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid vxlan id value %q passed", vniStr)
|
||||
}
|
||||
|
||||
vnis = append(vnis, uint32(vni))
|
||||
}
|
||||
}
|
||||
if _, ok := optMap[secureOption]; ok {
|
||||
n.secure = true
|
||||
}
|
||||
if val, ok := optMap[netlabel.DriverMTU]; ok {
|
||||
var err error
|
||||
if n.mtu, err = strconv.Atoi(val); err != nil {
|
||||
return fmt.Errorf("failed to parse %v: %v", val, err)
|
||||
}
|
||||
if n.mtu < 0 {
|
||||
return fmt.Errorf("invalid MTU value: %v", n.mtu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we are getting vnis from libnetwork, either we get for
|
||||
// all subnets or none.
|
||||
if len(vnis) != 0 && len(vnis) < len(ipV4Data) {
|
||||
return fmt.Errorf("insufficient vnis(%d) passed to overlay", len(vnis))
|
||||
}
|
||||
|
||||
for i, ipd := range ipV4Data {
|
||||
s := &subnet{
|
||||
subnetIP: ipd.Pool,
|
||||
gwIP: ipd.Gateway,
|
||||
once: &sync.Once{},
|
||||
}
|
||||
|
||||
if len(vnis) != 0 {
|
||||
s.vni = vnis[i]
|
||||
}
|
||||
|
||||
n.subnets = append(n.subnets, s)
|
||||
}
|
||||
|
||||
if err := n.writeToStore(); err != nil {
|
||||
return fmt.Errorf("failed to update data store for network %v: %v", n.id, err)
|
||||
}
|
||||
|
||||
// Make sure no rule is on the way from any stale secure network
|
||||
if !n.secure {
|
||||
for _, vni := range vnis {
|
||||
programMangle(vni, false)
|
||||
}
|
||||
}
|
||||
|
||||
if nInfo != nil {
|
||||
if err := nInfo.TableEventRegister(ovPeerTable); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.addNetwork(n)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) DeleteNetwork(nid string) error {
|
||||
if nid == "" {
|
||||
return fmt.Errorf("invalid network id")
|
||||
}
|
||||
|
||||
// Make sure driver resources are initialized before proceeding
|
||||
if err := d.configure(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return fmt.Errorf("could not find network with id %s", nid)
|
||||
}
|
||||
|
||||
d.deleteNetwork(nid)
|
||||
|
||||
vnis, err := n.releaseVxlanID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n.secure {
|
||||
for _, vni := range vnis {
|
||||
programMangle(vni, false)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) RevokeExternalConnectivity(nid, eid string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) incEndpointCount() {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.joinCnt++
|
||||
}
|
||||
|
||||
func (n *network) joinSandbox(restore bool) error {
|
||||
// If there is a race between two go routines here only one will win
|
||||
// the other will wait.
|
||||
n.once.Do(func() {
|
||||
// save the error status of initSandbox in n.initErr so that
|
||||
// all the racing go routines are able to know the status.
|
||||
n.initErr = n.initSandbox(restore)
|
||||
})
|
||||
|
||||
return n.initErr
|
||||
}
|
||||
|
||||
func (n *network) joinSubnetSandbox(s *subnet, restore bool) error {
|
||||
s.once.Do(func() {
|
||||
s.initErr = n.initSubnetSandbox(s, restore)
|
||||
})
|
||||
return s.initErr
|
||||
}
|
||||
|
||||
func (n *network) leaveSandbox() {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.joinCnt--
|
||||
if n.joinCnt != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// We are about to destroy sandbox since the container is leaving the network
|
||||
// Reinitialize the once variable so that we will be able to trigger one time
|
||||
// sandbox initialization(again) when another container joins subsequently.
|
||||
n.once = &sync.Once{}
|
||||
for _, s := range n.subnets {
|
||||
s.once = &sync.Once{}
|
||||
}
|
||||
|
||||
n.destroySandbox()
|
||||
}
|
||||
|
||||
// to be called while holding network lock
|
||||
func (n *network) destroySandbox() {
|
||||
if n.sbox != nil {
|
||||
for _, iface := range n.sbox.Info().Interfaces() {
|
||||
if err := iface.Remove(); err != nil {
|
||||
logrus.Debugf("Remove interface %s failed: %v", iface.SrcName(), err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range n.subnets {
|
||||
if s.vxlanName != "" {
|
||||
err := deleteInterface(s.vxlanName)
|
||||
if err != nil {
|
||||
logrus.Warnf("could not cleanup sandbox properly: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n.sbox.Destroy()
|
||||
n.sbox = nil
|
||||
}
|
||||
}
|
||||
|
||||
func networkOnceInit() {
|
||||
if os.Getenv("_OVERLAY_HOST_MODE") != "" {
|
||||
hostMode = true
|
||||
return
|
||||
}
|
||||
|
||||
err := createVxlan("testvxlan1", 1, 0)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create testvxlan1 interface: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer deleteInterface("testvxlan1")
|
||||
}
|
||||
|
||||
func (n *network) generateVxlanName(s *subnet) string {
|
||||
id := n.id
|
||||
if len(n.id) > 12 {
|
||||
id = n.id[:12]
|
||||
}
|
||||
|
||||
return "vx_" + id + "_0"
|
||||
}
|
||||
|
||||
func (n *network) generateBridgeName(s *subnet) string {
|
||||
id := n.id
|
||||
if len(n.id) > 5 {
|
||||
id = n.id[:5]
|
||||
}
|
||||
|
||||
return n.getBridgeNamePrefix(s) + "_" + id + "_0"
|
||||
}
|
||||
|
||||
func (n *network) getBridgeNamePrefix(s *subnet) string {
|
||||
return "ov_" + fmt.Sprintf("%06x", n.vxlanID(s))
|
||||
}
|
||||
|
||||
func isOverlap(nw *net.IPNet) bool {
|
||||
var nameservers []string
|
||||
|
||||
if rc, err := resolvconf.Get(); err == nil {
|
||||
nameservers = resolvconf.GetNameserversAsCIDR(rc.Content)
|
||||
}
|
||||
|
||||
if err := netutils.CheckNameserverOverlaps(nameservers, nw); err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if err := netutils.CheckRouteOverlaps(nw); err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) error {
|
||||
sbox := n.sandbox()
|
||||
|
||||
// restore overlay osl sandbox
|
||||
Ifaces := make(map[string][]osl.IfaceOption)
|
||||
brIfaceOption := make([]osl.IfaceOption, 2)
|
||||
brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Address(s.gwIP))
|
||||
brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Bridge(true))
|
||||
Ifaces[fmt.Sprintf("%s+%s", brName, "br")] = brIfaceOption
|
||||
|
||||
err := sbox.Restore(Ifaces, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Ifaces = make(map[string][]osl.IfaceOption)
|
||||
vxlanIfaceOption := make([]osl.IfaceOption, 1)
|
||||
vxlanIfaceOption = append(vxlanIfaceOption, sbox.InterfaceOptions().Master(brName))
|
||||
Ifaces[fmt.Sprintf("%s+%s", vxlanName, "vxlan")] = vxlanIfaceOption
|
||||
err = sbox.Restore(Ifaces, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) addInterface(srcName, dstPrefix, name string, isBridge bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) setupSubnetSandbox(s *subnet, brName, vxlanName string) error {
|
||||
|
||||
if hostMode {
|
||||
// Try to delete stale bridge interface if it exists
|
||||
if err := deleteInterface(brName); err != nil {
|
||||
deleteInterfaceBySubnet(n.getBridgeNamePrefix(s), s)
|
||||
}
|
||||
|
||||
if isOverlap(s.subnetIP) {
|
||||
return fmt.Errorf("overlay subnet %s has conflicts in the host while running in host mode", s.subnetIP.String())
|
||||
}
|
||||
}
|
||||
|
||||
if !hostMode {
|
||||
// Try to find this subnet's vni is being used in some
|
||||
// other namespace by looking at vniTbl that we just
|
||||
// populated in the once init. If a hit is found then
|
||||
// it must a stale namespace from previous
|
||||
// life. Destroy it completely and reclaim resourced.
|
||||
networkMu.Lock()
|
||||
path, ok := vniTbl[n.vxlanID(s)]
|
||||
networkMu.Unlock()
|
||||
|
||||
if ok {
|
||||
os.Remove(path)
|
||||
|
||||
networkMu.Lock()
|
||||
delete(vniTbl, n.vxlanID(s))
|
||||
networkMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
err := createVxlan(vxlanName, n.vxlanID(s), n.maxMTU())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) initSubnetSandbox(s *subnet, restore bool) error {
|
||||
brName := n.generateBridgeName(s)
|
||||
vxlanName := n.generateVxlanName(s)
|
||||
|
||||
if restore {
|
||||
n.restoreSubnetSandbox(s, brName, vxlanName)
|
||||
} else {
|
||||
n.setupSubnetSandbox(s, brName, vxlanName)
|
||||
}
|
||||
|
||||
n.Lock()
|
||||
s.vxlanName = vxlanName
|
||||
s.brName = brName
|
||||
n.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) cleanupStaleSandboxes() {
|
||||
filepath.Walk(filepath.Dir(osl.GenerateKey("walk")),
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
_, fname := filepath.Split(path)
|
||||
|
||||
pList := strings.Split(fname, "-")
|
||||
if len(pList) <= 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
pattern := pList[1]
|
||||
if strings.Contains(n.id, pattern) {
|
||||
// Now that we have destroyed this
|
||||
// sandbox, remove all references to
|
||||
// it in vniTbl so that we don't
|
||||
// inadvertently destroy the sandbox
|
||||
// created in this life.
|
||||
networkMu.Lock()
|
||||
for vni, tblPath := range vniTbl {
|
||||
if tblPath == path {
|
||||
delete(vniTbl, vni)
|
||||
}
|
||||
}
|
||||
networkMu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (n *network) initSandbox(restore bool) error {
|
||||
n.Lock()
|
||||
n.initEpoch++
|
||||
n.Unlock()
|
||||
|
||||
networkOnce.Do(networkOnceInit)
|
||||
|
||||
if !restore {
|
||||
// If there are any stale sandboxes related to this network
|
||||
// from previous daemon life clean it up here
|
||||
n.cleanupStaleSandboxes()
|
||||
}
|
||||
|
||||
// In the restore case network sandbox already exist; but we don't know
|
||||
// what epoch number it was created with. It has to be retrieved by
|
||||
// searching the net namespaces.
|
||||
key := ""
|
||||
if restore {
|
||||
key = osl.GenerateKey("-" + n.id)
|
||||
} else {
|
||||
key = osl.GenerateKey(fmt.Sprintf("%d-", n.initEpoch) + n.id)
|
||||
}
|
||||
|
||||
sbox, err := osl.NewSandbox(key, !hostMode, restore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get network sandbox (oper %t): %v", restore, err)
|
||||
}
|
||||
|
||||
n.setSandbox(sbox)
|
||||
|
||||
if !restore {
|
||||
n.driver.peerDbUpdateSandbox(n.id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) addNetwork(n *network) {
|
||||
d.Lock()
|
||||
d.networks[n.id] = n
|
||||
d.Unlock()
|
||||
}
|
||||
|
||||
func (d *driver) deleteNetwork(nid string) {
|
||||
d.Lock()
|
||||
delete(d.networks, nid)
|
||||
d.Unlock()
|
||||
}
|
||||
|
||||
func (d *driver) network(nid string) *network {
|
||||
d.Lock()
|
||||
networks := d.networks
|
||||
d.Unlock()
|
||||
|
||||
n, ok := networks[nid]
|
||||
if !ok {
|
||||
n = d.getNetworkFromStore(nid)
|
||||
if n != nil {
|
||||
n.driver = d
|
||||
n.endpoints = endpointTable{}
|
||||
n.once = &sync.Once{}
|
||||
networks[nid] = n
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func (d *driver) getNetworkFromStore(nid string) *network {
|
||||
if d.store == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
n := &network{id: nid}
|
||||
if err := d.store.GetObject(datastore.Key(n.Key()...), n); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func (n *network) sandbox() osl.Sandbox {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
return n.sbox
|
||||
}
|
||||
|
||||
func (n *network) setSandbox(sbox osl.Sandbox) {
|
||||
n.Lock()
|
||||
n.sbox = sbox
|
||||
n.Unlock()
|
||||
}
|
||||
|
||||
func (n *network) vxlanID(s *subnet) uint32 {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
return s.vni
|
||||
}
|
||||
|
||||
func (n *network) setVxlanID(s *subnet, vni uint32) {
|
||||
n.Lock()
|
||||
s.vni = vni
|
||||
n.Unlock()
|
||||
}
|
||||
|
||||
func (n *network) Key() []string {
|
||||
return []string{"overlay", "network", n.id}
|
||||
}
|
||||
|
||||
func (n *network) KeyPrefix() []string {
|
||||
return []string{"overlay", "network"}
|
||||
}
|
||||
|
||||
func (n *network) Value() []byte {
|
||||
m := map[string]interface{}{}
|
||||
|
||||
netJSON := []*subnetJSON{}
|
||||
|
||||
for _, s := range n.subnets {
|
||||
sj := &subnetJSON{
|
||||
SubnetIP: s.subnetIP.String(),
|
||||
GwIP: s.gwIP.String(),
|
||||
Vni: s.vni,
|
||||
}
|
||||
netJSON = append(netJSON, sj)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(netJSON)
|
||||
if err != nil {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
m["secure"] = n.secure
|
||||
m["subnets"] = netJSON
|
||||
m["mtu"] = n.mtu
|
||||
b, err = json.Marshal(m)
|
||||
if err != nil {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (n *network) Index() uint64 {
|
||||
return n.dbIndex
|
||||
}
|
||||
|
||||
func (n *network) SetIndex(index uint64) {
|
||||
n.dbIndex = index
|
||||
n.dbExists = true
|
||||
}
|
||||
|
||||
func (n *network) Exists() bool {
|
||||
return n.dbExists
|
||||
}
|
||||
|
||||
func (n *network) Skip() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *network) SetValue(value []byte) error {
|
||||
var (
|
||||
m map[string]interface{}
|
||||
newNet bool
|
||||
isMap = true
|
||||
netJSON = []*subnetJSON{}
|
||||
)
|
||||
|
||||
if err := json.Unmarshal(value, &m); err != nil {
|
||||
err := json.Unmarshal(value, &netJSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isMap = false
|
||||
}
|
||||
|
||||
if len(n.subnets) == 0 {
|
||||
newNet = true
|
||||
}
|
||||
|
||||
if isMap {
|
||||
if val, ok := m["secure"]; ok {
|
||||
n.secure = val.(bool)
|
||||
}
|
||||
if val, ok := m["mtu"]; ok {
|
||||
n.mtu = int(val.(float64))
|
||||
}
|
||||
bytes, err := json.Marshal(m["subnets"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := json.Unmarshal(bytes, &netJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, sj := range netJSON {
|
||||
subnetIPstr := sj.SubnetIP
|
||||
gwIPstr := sj.GwIP
|
||||
vni := sj.Vni
|
||||
|
||||
subnetIP, _ := types.ParseCIDR(subnetIPstr)
|
||||
gwIP, _ := types.ParseCIDR(gwIPstr)
|
||||
|
||||
if newNet {
|
||||
s := &subnet{
|
||||
subnetIP: subnetIP,
|
||||
gwIP: gwIP,
|
||||
vni: vni,
|
||||
once: &sync.Once{},
|
||||
}
|
||||
n.subnets = append(n.subnets, s)
|
||||
} else {
|
||||
sNet := n.getMatchingSubnet(subnetIP)
|
||||
if sNet != nil {
|
||||
sNet.vni = vni
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) DataScope() string {
|
||||
return datastore.GlobalScope
|
||||
}
|
||||
|
||||
func (n *network) writeToStore() error {
|
||||
if n.driver.store == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return n.driver.store.PutObjectAtomic(n)
|
||||
}
|
||||
|
||||
func (n *network) releaseVxlanID() ([]uint32, error) {
|
||||
if len(n.subnets) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n.driver.store != nil {
|
||||
if err := n.driver.store.DeleteObjectAtomic(n); err != nil {
|
||||
if err == datastore.ErrKeyModified || err == datastore.ErrKeyNotFound {
|
||||
// In both the above cases we can safely assume that the key has been removed by some other
|
||||
// instance and so simply get out of here
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to delete network to vxlan id map: %v", err)
|
||||
}
|
||||
}
|
||||
var vnis []uint32
|
||||
for _, s := range n.subnets {
|
||||
if n.driver.vxlanIdm != nil {
|
||||
vni := n.vxlanID(s)
|
||||
vnis = append(vnis, vni)
|
||||
n.driver.vxlanIdm.Release(uint64(vni))
|
||||
}
|
||||
|
||||
n.setVxlanID(s, 0)
|
||||
}
|
||||
|
||||
return vnis, nil
|
||||
}
|
||||
|
||||
func (n *network) obtainVxlanID(s *subnet) error {
|
||||
//return if the subnet already has a vxlan id assigned
|
||||
if s.vni != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if n.driver.store == nil {
|
||||
return fmt.Errorf("no valid vxlan id and no datastore configured, cannot obtain vxlan id")
|
||||
}
|
||||
|
||||
for {
|
||||
if err := n.driver.store.GetObject(datastore.Key(n.Key()...), n); err != nil {
|
||||
return fmt.Errorf("getting network %q from datastore failed %v", n.id, err)
|
||||
}
|
||||
|
||||
if s.vni == 0 {
|
||||
vxlanID, err := n.driver.vxlanIdm.GetID()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to allocate vxlan id: %v", err)
|
||||
}
|
||||
|
||||
n.setVxlanID(s, uint32(vxlanID))
|
||||
if err := n.writeToStore(); err != nil {
|
||||
n.driver.vxlanIdm.Release(uint64(n.vxlanID(s)))
|
||||
n.setVxlanID(s, 0)
|
||||
if err == datastore.ErrKeyModified {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("network %q failed to update data store: %v", n.id, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// contains return true if the passed ip belongs to one the network's
|
||||
// subnets
|
||||
func (n *network) contains(ip net.IP) bool {
|
||||
for _, s := range n.subnets {
|
||||
if s.subnetIP.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// getSubnetforIP returns the subnet to which the given IP belongs
|
||||
func (n *network) getSubnetforIP(ip *net.IPNet) *subnet {
|
||||
for _, s := range n.subnets {
|
||||
// first check if the mask lengths are the same
|
||||
i, _ := s.subnetIP.Mask.Size()
|
||||
j, _ := ip.Mask.Size()
|
||||
if i != j {
|
||||
continue
|
||||
}
|
||||
if s.subnetIP.Contains(ip.IP) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getMatchingSubnet return the network's subnet that matches the input
|
||||
func (n *network) getMatchingSubnet(ip *net.IPNet) *subnet {
|
||||
if ip == nil {
|
||||
return nil
|
||||
}
|
||||
for _, s := range n.subnets {
|
||||
// first check if the mask lengths are the same
|
||||
i, _ := s.subnetIP.Mask.Size()
|
||||
j, _ := ip.Mask.Size()
|
||||
if i != j {
|
||||
continue
|
||||
}
|
||||
if s.subnetIP.IP.Equal(ip.IP) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
233
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_serf.go
generated
vendored
Normal file
233
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_serf.go
generated
vendored
Normal file
|
@ -0,0 +1,233 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
||||
type ovNotify struct {
|
||||
action string
|
||||
ep *endpoint
|
||||
nw *network
|
||||
}
|
||||
|
||||
type logWriter struct{}
|
||||
|
||||
func (l *logWriter) Write(p []byte) (int, error) {
|
||||
str := string(p)
|
||||
|
||||
switch {
|
||||
case strings.Contains(str, "[WARN]"):
|
||||
logrus.Warn(str)
|
||||
case strings.Contains(str, "[DEBUG]"):
|
||||
logrus.Debug(str)
|
||||
case strings.Contains(str, "[INFO]"):
|
||||
logrus.Info(str)
|
||||
case strings.Contains(str, "[ERR]"):
|
||||
logrus.Error(str)
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (d *driver) serfInit() error {
|
||||
var err error
|
||||
|
||||
config := serf.DefaultConfig()
|
||||
config.Init()
|
||||
config.MemberlistConfig.BindAddr = d.advertiseAddress
|
||||
|
||||
d.eventCh = make(chan serf.Event, 4)
|
||||
config.EventCh = d.eventCh
|
||||
config.UserCoalescePeriod = 1 * time.Second
|
||||
config.UserQuiescentPeriod = 50 * time.Millisecond
|
||||
|
||||
config.LogOutput = &logWriter{}
|
||||
config.MemberlistConfig.LogOutput = config.LogOutput
|
||||
|
||||
s, err := serf.Create(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cluster node: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
d.serfInstance = s
|
||||
|
||||
d.notifyCh = make(chan ovNotify)
|
||||
d.exitCh = make(chan chan struct{})
|
||||
|
||||
go d.startSerfLoop(d.eventCh, d.notifyCh, d.exitCh)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) serfJoin(neighIP string) error {
|
||||
if neighIP == "" {
|
||||
return fmt.Errorf("no neighbor to join")
|
||||
}
|
||||
if _, err := d.serfInstance.Join([]string{neighIP}, false); err != nil {
|
||||
return fmt.Errorf("Failed to join the cluster at neigh IP %s: %v",
|
||||
neighIP, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) notifyEvent(event ovNotify) {
|
||||
ep := event.ep
|
||||
|
||||
ePayload := fmt.Sprintf("%s %s %s %s", event.action, ep.addr.IP.String(),
|
||||
net.IP(ep.addr.Mask).String(), ep.mac.String())
|
||||
eName := fmt.Sprintf("jl %s %s %s", d.serfInstance.LocalMember().Addr.String(),
|
||||
event.nw.id, ep.id)
|
||||
|
||||
if err := d.serfInstance.UserEvent(eName, []byte(ePayload), true); err != nil {
|
||||
logrus.Errorf("Sending user event failed: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) processEvent(u serf.UserEvent) {
|
||||
logrus.Debugf("Received user event name:%s, payload:%s\n", u.Name,
|
||||
string(u.Payload))
|
||||
|
||||
var dummy, action, vtepStr, nid, eid, ipStr, maskStr, macStr string
|
||||
if _, err := fmt.Sscan(u.Name, &dummy, &vtepStr, &nid, &eid); err != nil {
|
||||
fmt.Printf("Failed to scan name string: %v\n", err)
|
||||
}
|
||||
|
||||
if _, err := fmt.Sscan(string(u.Payload), &action,
|
||||
&ipStr, &maskStr, &macStr); err != nil {
|
||||
fmt.Printf("Failed to scan value string: %v\n", err)
|
||||
}
|
||||
|
||||
logrus.Debugf("Parsed data = %s/%s/%s/%s/%s/%s\n", nid, eid, vtepStr, ipStr, maskStr, macStr)
|
||||
|
||||
mac, err := net.ParseMAC(macStr)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to parse mac: %v\n", err)
|
||||
}
|
||||
|
||||
if d.serfInstance.LocalMember().Addr.String() == vtepStr {
|
||||
return
|
||||
}
|
||||
|
||||
switch action {
|
||||
case "join":
|
||||
if err := d.peerAdd(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac,
|
||||
net.ParseIP(vtepStr), true); err != nil {
|
||||
logrus.Errorf("Peer add failed in the driver: %v\n", err)
|
||||
}
|
||||
case "leave":
|
||||
if err := d.peerDelete(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac,
|
||||
net.ParseIP(vtepStr), true); err != nil {
|
||||
logrus.Errorf("Peer delete failed in the driver: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) processQuery(q *serf.Query) {
|
||||
logrus.Debugf("Received query name:%s, payload:%s\n", q.Name,
|
||||
string(q.Payload))
|
||||
|
||||
var nid, ipStr string
|
||||
if _, err := fmt.Sscan(string(q.Payload), &nid, &ipStr); err != nil {
|
||||
fmt.Printf("Failed to scan query payload string: %v\n", err)
|
||||
}
|
||||
|
||||
peerMac, peerIPMask, vtep, err := d.peerDbSearch(nid, net.ParseIP(ipStr))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
q.Respond([]byte(fmt.Sprintf("%s %s %s", peerMac.String(), net.IP(peerIPMask).String(), vtep.String())))
|
||||
}
|
||||
|
||||
func (d *driver) resolvePeer(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {
|
||||
if d.serfInstance == nil {
|
||||
return nil, nil, nil, fmt.Errorf("could not resolve peer: serf instance not initialized")
|
||||
}
|
||||
|
||||
qPayload := fmt.Sprintf("%s %s", string(nid), peerIP.String())
|
||||
resp, err := d.serfInstance.Query("peerlookup", []byte(qPayload), nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("resolving peer by querying the cluster failed: %v", err)
|
||||
}
|
||||
|
||||
respCh := resp.ResponseCh()
|
||||
select {
|
||||
case r := <-respCh:
|
||||
var macStr, maskStr, vtepStr string
|
||||
if _, err := fmt.Sscan(string(r.Payload), &macStr, &maskStr, &vtepStr); err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("bad response %q for the resolve query: %v", string(r.Payload), err)
|
||||
}
|
||||
|
||||
mac, err := net.ParseMAC(macStr)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("failed to parse mac: %v", err)
|
||||
}
|
||||
|
||||
return mac, net.IPMask(net.ParseIP(maskStr).To4()), net.ParseIP(vtepStr), nil
|
||||
|
||||
case <-time.After(time.Second):
|
||||
return nil, nil, nil, fmt.Errorf("timed out resolving peer by querying the cluster")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) startSerfLoop(eventCh chan serf.Event, notifyCh chan ovNotify,
|
||||
exitCh chan chan struct{}) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case notify, ok := <-notifyCh:
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
d.notifyEvent(notify)
|
||||
case ch, ok := <-exitCh:
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if err := d.serfInstance.Leave(); err != nil {
|
||||
logrus.Errorf("failed leaving the cluster: %v\n", err)
|
||||
}
|
||||
|
||||
d.serfInstance.Shutdown()
|
||||
close(ch)
|
||||
return
|
||||
case e, ok := <-eventCh:
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if e.EventType() == serf.EventQuery {
|
||||
d.processQuery(e.(*serf.Query))
|
||||
break
|
||||
}
|
||||
|
||||
u, ok := e.(serf.UserEvent)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
d.processEvent(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) isSerfAlive() bool {
|
||||
d.Lock()
|
||||
serfInstance := d.serfInstance
|
||||
d.Unlock()
|
||||
if serfInstance == nil || serfInstance.State() != serf.SerfAlive {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
61
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_utils.go
generated
vendored
Normal file
61
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_utils.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/libnetwork/osl"
|
||||
)
|
||||
|
||||
func validateID(nid, eid string) error {
|
||||
if nid == "" {
|
||||
return fmt.Errorf("invalid network id")
|
||||
}
|
||||
|
||||
if eid == "" {
|
||||
return fmt.Errorf("invalid endpoint id")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createVxlan(name string, vni uint32, mtu int) error {
|
||||
defer osl.InitOSContext()()
|
||||
|
||||
// Get default interface to plumb the vxlan on
|
||||
routeCmd := "/usr/sbin/ipadm show-addr -p -o addrobj " +
|
||||
"`/usr/sbin/route get default | /usr/bin/grep interface | " +
|
||||
"/usr/bin/awk '{print $2}'`"
|
||||
out, err := exec.Command("/usr/bin/bash", "-c", routeCmd).Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get default route: %v", err)
|
||||
}
|
||||
|
||||
defaultInterface := strings.SplitN(string(out), "/", 2)
|
||||
propList := fmt.Sprintf("interface=%s,vni=%d", defaultInterface[0], vni)
|
||||
|
||||
out, err = exec.Command("/usr/sbin/dladm", "create-vxlan", "-t", "-p", propList,
|
||||
name).Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating vxlan interface: %v %s", err, out)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteInterfaceBySubnet(brPrefix string, s *subnet) error {
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func deleteInterface(name string) error {
|
||||
defer osl.InitOSContext()()
|
||||
|
||||
out, err := exec.Command("/usr/sbin/dladm", "delete-vxlan", name).Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating vxlan interface: %v %s", err, out)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
362
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go
generated
vendored
Normal file
362
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go
generated
vendored
Normal file
|
@ -0,0 +1,362 @@
|
|||
package overlay
|
||||
|
||||
//go:generate protoc -I.:../../Godeps/_workspace/src/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork/drivers/overlay,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. overlay.proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/discoverapi"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/idm"
|
||||
"github.com/docker/libnetwork/netlabel"
|
||||
"github.com/docker/libnetwork/osl"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
||||
// XXX OVERLAY_SOLARIS
|
||||
// Might need changes for names/constant values in solaris
|
||||
const (
|
||||
networkType = "overlay"
|
||||
vethPrefix = "veth"
|
||||
vethLen = 7
|
||||
vxlanIDStart = 256
|
||||
vxlanIDEnd = (1 << 24) - 1
|
||||
vxlanPort = 4789
|
||||
vxlanEncap = 50
|
||||
secureOption = "encrypted"
|
||||
)
|
||||
|
||||
var initVxlanIdm = make(chan (bool), 1)
|
||||
|
||||
type driver struct {
|
||||
eventCh chan serf.Event
|
||||
notifyCh chan ovNotify
|
||||
exitCh chan chan struct{}
|
||||
bindAddress string
|
||||
advertiseAddress string
|
||||
neighIP string
|
||||
config map[string]interface{}
|
||||
peerDb peerNetworkMap
|
||||
secMap *encrMap
|
||||
serfInstance *serf.Serf
|
||||
networks networkTable
|
||||
store datastore.DataStore
|
||||
localStore datastore.DataStore
|
||||
vxlanIdm *idm.Idm
|
||||
once sync.Once
|
||||
joinOnce sync.Once
|
||||
keys []*key
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// Init registers a new instance of overlay driver
|
||||
func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
|
||||
c := driverapi.Capability{
|
||||
DataScope: datastore.GlobalScope,
|
||||
}
|
||||
d := &driver{
|
||||
networks: networkTable{},
|
||||
peerDb: peerNetworkMap{
|
||||
mp: map[string]*peerMap{},
|
||||
},
|
||||
secMap: &encrMap{nodes: map[string][]*spi{}},
|
||||
config: config,
|
||||
}
|
||||
|
||||
if data, ok := config[netlabel.GlobalKVClient]; ok {
|
||||
var err error
|
||||
dsc, ok := data.(discoverapi.DatastoreConfigData)
|
||||
if !ok {
|
||||
return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
|
||||
}
|
||||
d.store, err = datastore.NewDataStoreFromConfig(dsc)
|
||||
if err != nil {
|
||||
return types.InternalErrorf("failed to initialize data store: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if data, ok := config[netlabel.LocalKVClient]; ok {
|
||||
var err error
|
||||
dsc, ok := data.(discoverapi.DatastoreConfigData)
|
||||
if !ok {
|
||||
return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
|
||||
}
|
||||
d.localStore, err = datastore.NewDataStoreFromConfig(dsc)
|
||||
if err != nil {
|
||||
return types.InternalErrorf("failed to initialize local data store: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
d.restoreEndpoints()
|
||||
|
||||
return dc.RegisterDriver(networkType, d, c)
|
||||
}
|
||||
|
||||
// Endpoints are stored in the local store. Restore them and reconstruct the overlay sandbox
|
||||
func (d *driver) restoreEndpoints() error {
|
||||
if d.localStore == nil {
|
||||
logrus.Warnf("Cannot restore overlay endpoints because local datastore is missing")
|
||||
return nil
|
||||
}
|
||||
kvol, err := d.localStore.List(datastore.Key(overlayEndpointPrefix), &endpoint{})
|
||||
if err != nil && err != datastore.ErrKeyNotFound {
|
||||
return fmt.Errorf("failed to read overlay endpoint from store: %v", err)
|
||||
}
|
||||
|
||||
if err == datastore.ErrKeyNotFound {
|
||||
return nil
|
||||
}
|
||||
for _, kvo := range kvol {
|
||||
ep := kvo.(*endpoint)
|
||||
n := d.network(ep.nid)
|
||||
if n == nil {
|
||||
logrus.Debugf("Network (%s) not found for restored endpoint (%s)", ep.nid[0:7], ep.id[0:7])
|
||||
logrus.Debugf("Deleting stale overlay endpoint (%s) from store", ep.id[0:7])
|
||||
if err := d.deleteEndpointFromStore(ep); err != nil {
|
||||
logrus.Debugf("Failed to delete stale overlay endpoint (%s) from store", ep.id[0:7])
|
||||
}
|
||||
continue
|
||||
}
|
||||
n.addEndpoint(ep)
|
||||
|
||||
s := n.getSubnetforIP(ep.addr)
|
||||
if s == nil {
|
||||
return fmt.Errorf("could not find subnet for endpoint %s", ep.id)
|
||||
}
|
||||
|
||||
if err := n.joinSandbox(true); err != nil {
|
||||
return fmt.Errorf("restore network sandbox failed: %v", err)
|
||||
}
|
||||
|
||||
if err := n.joinSubnetSandbox(s, true); err != nil {
|
||||
return fmt.Errorf("restore subnet sandbox failed for %q: %v", s.subnetIP.String(), err)
|
||||
}
|
||||
|
||||
Ifaces := make(map[string][]osl.IfaceOption)
|
||||
vethIfaceOption := make([]osl.IfaceOption, 1)
|
||||
vethIfaceOption = append(vethIfaceOption, n.sbox.InterfaceOptions().Master(s.brName))
|
||||
Ifaces[fmt.Sprintf("%s+%s", "veth", "veth")] = vethIfaceOption
|
||||
|
||||
err := n.sbox.Restore(Ifaces, nil, nil, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to restore overlay sandbox: %v", err)
|
||||
}
|
||||
|
||||
n.incEndpointCount()
|
||||
d.peerDbAdd(ep.nid, ep.id, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), true)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fini cleans up the driver resources
|
||||
func Fini(drv driverapi.Driver) {
|
||||
d := drv.(*driver)
|
||||
|
||||
if d.exitCh != nil {
|
||||
waitCh := make(chan struct{})
|
||||
|
||||
d.exitCh <- waitCh
|
||||
|
||||
<-waitCh
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) configure() error {
|
||||
if d.store == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.vxlanIdm == nil {
|
||||
return d.initializeVxlanIdm()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) initializeVxlanIdm() error {
|
||||
var err error
|
||||
|
||||
initVxlanIdm <- true
|
||||
defer func() { <-initVxlanIdm }()
|
||||
|
||||
if d.vxlanIdm != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.vxlanIdm, err = idm.New(d.store, "vxlan-id", vxlanIDStart, vxlanIDEnd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize vxlan id manager: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) Type() string {
|
||||
return networkType
|
||||
}
|
||||
|
||||
func validateSelf(node string) error {
|
||||
advIP := net.ParseIP(node)
|
||||
if advIP == nil {
|
||||
return fmt.Errorf("invalid self address (%s)", node)
|
||||
}
|
||||
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to get interface addresses %v", err)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
ip, _, err := net.ParseCIDR(addr.String())
|
||||
if err == nil && ip.Equal(advIP) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Multi-Host overlay networking requires cluster-advertise(%s) to be configured with a local ip-address that is reachable within the cluster", advIP.String())
|
||||
}
|
||||
|
||||
func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) {
|
||||
if self && !d.isSerfAlive() {
|
||||
d.Lock()
|
||||
d.advertiseAddress = advertiseAddress
|
||||
d.bindAddress = bindAddress
|
||||
d.Unlock()
|
||||
|
||||
// If there is no cluster store there is no need to start serf.
|
||||
if d.store != nil {
|
||||
if err := validateSelf(advertiseAddress); err != nil {
|
||||
logrus.Warnf("%s", err.Error())
|
||||
}
|
||||
err := d.serfInit()
|
||||
if err != nil {
|
||||
logrus.Errorf("initializing serf instance failed: %v", err)
|
||||
d.Lock()
|
||||
d.advertiseAddress = ""
|
||||
d.bindAddress = ""
|
||||
d.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.Lock()
|
||||
if !self {
|
||||
d.neighIP = advertiseAddress
|
||||
}
|
||||
neighIP := d.neighIP
|
||||
d.Unlock()
|
||||
|
||||
if d.serfInstance != nil && neighIP != "" {
|
||||
var err error
|
||||
d.joinOnce.Do(func() {
|
||||
err = d.serfJoin(neighIP)
|
||||
if err == nil {
|
||||
d.pushLocalDb()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Errorf("joining serf neighbor %s failed: %v", advertiseAddress, err)
|
||||
d.Lock()
|
||||
d.joinOnce = sync.Once{}
|
||||
d.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) pushLocalEndpointEvent(action, nid, eid string) {
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
logrus.Debugf("Error pushing local endpoint event for network %s", nid)
|
||||
return
|
||||
}
|
||||
ep := n.endpoint(eid)
|
||||
if ep == nil {
|
||||
logrus.Debugf("Error pushing local endpoint event for ep %s / %s", nid, eid)
|
||||
return
|
||||
}
|
||||
|
||||
if !d.isSerfAlive() {
|
||||
return
|
||||
}
|
||||
d.notifyCh <- ovNotify{
|
||||
action: "join",
|
||||
nw: n,
|
||||
ep: ep,
|
||||
}
|
||||
}
|
||||
|
||||
// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster
|
||||
func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error {
|
||||
var err error
|
||||
switch dType {
|
||||
case discoverapi.NodeDiscovery:
|
||||
nodeData, ok := data.(discoverapi.NodeDiscoveryData)
|
||||
if !ok || nodeData.Address == "" {
|
||||
return fmt.Errorf("invalid discovery data")
|
||||
}
|
||||
d.nodeJoin(nodeData.Address, nodeData.BindAddress, nodeData.Self)
|
||||
case discoverapi.DatastoreConfig:
|
||||
if d.store != nil {
|
||||
return types.ForbiddenErrorf("cannot accept datastore configuration: Overlay driver has a datastore configured already")
|
||||
}
|
||||
dsc, ok := data.(discoverapi.DatastoreConfigData)
|
||||
if !ok {
|
||||
return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
|
||||
}
|
||||
d.store, err = datastore.NewDataStoreFromConfig(dsc)
|
||||
if err != nil {
|
||||
return types.InternalErrorf("failed to initialize data store: %v", err)
|
||||
}
|
||||
case discoverapi.EncryptionKeysConfig:
|
||||
encrData, ok := data.(discoverapi.DriverEncryptionConfig)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid encryption key notification data")
|
||||
}
|
||||
keys := make([]*key, 0, len(encrData.Keys))
|
||||
for i := 0; i < len(encrData.Keys); i++ {
|
||||
k := &key{
|
||||
value: encrData.Keys[i],
|
||||
tag: uint32(encrData.Tags[i]),
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
d.setKeys(keys)
|
||||
case discoverapi.EncryptionKeysUpdate:
|
||||
var newKey, delKey, priKey *key
|
||||
encrData, ok := data.(discoverapi.DriverEncryptionUpdate)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid encryption key notification data")
|
||||
}
|
||||
if encrData.Key != nil {
|
||||
newKey = &key{
|
||||
value: encrData.Key,
|
||||
tag: uint32(encrData.Tag),
|
||||
}
|
||||
}
|
||||
if encrData.Primary != nil {
|
||||
priKey = &key{
|
||||
value: encrData.Primary,
|
||||
tag: uint32(encrData.PrimaryTag),
|
||||
}
|
||||
}
|
||||
if encrData.Prune != nil {
|
||||
delKey = &key{
|
||||
value: encrData.Prune,
|
||||
tag: uint32(encrData.PruneTag),
|
||||
}
|
||||
}
|
||||
d.updateKeys(newKey, priKey, delKey)
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster
|
||||
func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error {
|
||||
return nil
|
||||
}
|
468
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.pb.go
generated
vendored
Normal file
468
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,468 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: overlay.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package overlay is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
overlay.proto
|
||||
|
||||
It has these top-level messages:
|
||||
PeerRecord
|
||||
*/
|
||||
package overlay
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import strings "strings"
|
||||
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
import sort "sort"
|
||||
import strconv "strconv"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.GoGoProtoPackageIsVersion1
|
||||
|
||||
// PeerRecord defines the information corresponding to a peer
|
||||
// container in the overlay network.
|
||||
type PeerRecord struct {
|
||||
// Endpoint IP is the IP of the container attachment on the
|
||||
// given overlay network.
|
||||
EndpointIP string `protobuf:"bytes,1,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"`
|
||||
// Endpoint MAC is the mac address of the container attachment
|
||||
// on the given overlay network.
|
||||
EndpointMAC string `protobuf:"bytes,2,opt,name=endpoint_mac,json=endpointMac,proto3" json:"endpoint_mac,omitempty"`
|
||||
// Tunnel Endpoint IP defines the host IP for the host in
|
||||
// which this container is running and can be reached by
|
||||
// building a tunnel to that host IP.
|
||||
TunnelEndpointIP string `protobuf:"bytes,3,opt,name=tunnel_endpoint_ip,json=tunnelEndpointIp,proto3" json:"tunnel_endpoint_ip,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PeerRecord) Reset() { *m = PeerRecord{} }
|
||||
func (*PeerRecord) ProtoMessage() {}
|
||||
func (*PeerRecord) Descriptor() ([]byte, []int) { return fileDescriptorOverlay, []int{0} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*PeerRecord)(nil), "overlay.PeerRecord")
|
||||
}
|
||||
func (this *PeerRecord) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&overlay.PeerRecord{")
|
||||
s = append(s, "EndpointIP: "+fmt.Sprintf("%#v", this.EndpointIP)+",\n")
|
||||
s = append(s, "EndpointMAC: "+fmt.Sprintf("%#v", this.EndpointMAC)+",\n")
|
||||
s = append(s, "TunnelEndpointIP: "+fmt.Sprintf("%#v", this.TunnelEndpointIP)+",\n")
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringOverlay(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func extensionToGoStringOverlay(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
|
||||
if e == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := "map[int32]proto.Extension{"
|
||||
keys := make([]int, 0, len(e))
|
||||
for k := range e {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
ss := []string{}
|
||||
for _, k := range keys {
|
||||
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
|
||||
}
|
||||
s += strings.Join(ss, ",") + "}"
|
||||
return s
|
||||
}
|
||||
func (m *PeerRecord) Marshal() (data []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
}
|
||||
|
||||
func (m *PeerRecord) MarshalTo(data []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.EndpointIP) > 0 {
|
||||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintOverlay(data, i, uint64(len(m.EndpointIP)))
|
||||
i += copy(data[i:], m.EndpointIP)
|
||||
}
|
||||
if len(m.EndpointMAC) > 0 {
|
||||
data[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintOverlay(data, i, uint64(len(m.EndpointMAC)))
|
||||
i += copy(data[i:], m.EndpointMAC)
|
||||
}
|
||||
if len(m.TunnelEndpointIP) > 0 {
|
||||
data[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintOverlay(data, i, uint64(len(m.TunnelEndpointIP)))
|
||||
i += copy(data[i:], m.TunnelEndpointIP)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Overlay(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Overlay(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintOverlay(data []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *PeerRecord) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.EndpointIP)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovOverlay(uint64(l))
|
||||
}
|
||||
l = len(m.EndpointMAC)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovOverlay(uint64(l))
|
||||
}
|
||||
l = len(m.TunnelEndpointIP)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovOverlay(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovOverlay(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozOverlay(x uint64) (n int) {
|
||||
return sovOverlay(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *PeerRecord) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&PeerRecord{`,
|
||||
`EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`,
|
||||
`EndpointMAC:` + fmt.Sprintf("%v", this.EndpointMAC) + `,`,
|
||||
`TunnelEndpointIP:` + fmt.Sprintf("%v", this.TunnelEndpointIP) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringOverlay(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *PeerRecord) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: PeerRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: PeerRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field EndpointIP", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthOverlay
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.EndpointIP = string(data[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field EndpointMAC", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthOverlay
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.EndpointMAC = string(data[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field TunnelEndpointIP", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthOverlay
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.TunnelEndpointIP = string(data[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipOverlay(data[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOverlay
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipOverlay(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if data[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthOverlay
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipOverlay(data[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthOverlay = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowOverlay = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
var fileDescriptorOverlay = []byte{
|
||||
// 195 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x2f, 0x4b, 0x2d,
|
||||
0xca, 0x49, 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0x72, 0xa5, 0x44, 0xd2,
|
||||
0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x69, 0x2b, 0x23, 0x17, 0x57, 0x40,
|
||||
0x6a, 0x6a, 0x51, 0x50, 0x6a, 0x72, 0x7e, 0x51, 0x8a, 0x90, 0x3e, 0x17, 0x77, 0x6a, 0x5e, 0x4a,
|
||||
0x41, 0x7e, 0x66, 0x5e, 0x49, 0x7c, 0x66, 0x81, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x13, 0xdf,
|
||||
0xa3, 0x7b, 0xf2, 0x5c, 0xae, 0x50, 0x61, 0xcf, 0x80, 0x20, 0x2e, 0x98, 0x12, 0xcf, 0x02, 0x21,
|
||||
0x23, 0x2e, 0x1e, 0xb8, 0x86, 0xdc, 0xc4, 0x64, 0x09, 0x26, 0xb0, 0x0e, 0x7e, 0xa0, 0x0e, 0x6e,
|
||||
0x98, 0x0e, 0x5f, 0x47, 0xe7, 0x20, 0xb8, 0xa9, 0xbe, 0x89, 0xc9, 0x42, 0x4e, 0x5c, 0x42, 0x25,
|
||||
0xa5, 0x79, 0x79, 0xa9, 0x39, 0xf1, 0xc8, 0x76, 0x31, 0x83, 0x75, 0x8a, 0x00, 0x75, 0x0a, 0x84,
|
||||
0x80, 0x65, 0x91, 0x6c, 0x14, 0x28, 0x41, 0x15, 0x29, 0x70, 0x92, 0xb8, 0xf1, 0x50, 0x8e, 0xe1,
|
||||
0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x80, 0xf8, 0x02, 0x10, 0x3f, 0x00, 0xe2,
|
||||
0x24, 0x36, 0xb0, 0xc7, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xd7, 0x7d, 0x7d, 0x08,
|
||||
0x01, 0x00, 0x00,
|
||||
}
|
27
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.proto
generated
vendored
Normal file
27
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.proto
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
syntax = "proto3";
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
package overlay;
|
||||
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
option (gogoproto.stringer_all) = true;
|
||||
option (gogoproto.gostring_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.goproto_stringer_all) = false;
|
||||
|
||||
// PeerRecord defines the information corresponding to a peer
|
||||
// container in the overlay network.
|
||||
message PeerRecord {
|
||||
// Endpoint IP is the IP of the container attachment on the
|
||||
// given overlay network.
|
||||
string endpoint_ip = 1 [(gogoproto.customname) = "EndpointIP"];
|
||||
// Endpoint MAC is the mac address of the container attachment
|
||||
// on the given overlay network.
|
||||
string endpoint_mac = 2 [(gogoproto.customname) = "EndpointMAC"];
|
||||
// Tunnel Endpoint IP defines the host IP for the host in
|
||||
// which this container is running and can be reached by
|
||||
// building a tunnel to that host IP.
|
||||
string tunnel_endpoint_ip = 3 [(gogoproto.customname) = "TunnelEndpointIP"];
|
||||
}
|
336
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/peerdb.go
generated
vendored
Normal file
336
vendor/github.com/docker/libnetwork/drivers/solaris/overlay/peerdb.go
generated
vendored
Normal file
|
@ -0,0 +1,336 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
const ovPeerTable = "overlay_peer_table"
|
||||
|
||||
type peerKey struct {
|
||||
peerIP net.IP
|
||||
peerMac net.HardwareAddr
|
||||
}
|
||||
|
||||
type peerEntry struct {
|
||||
eid string
|
||||
vtep net.IP
|
||||
peerIPMask net.IPMask
|
||||
inSandbox bool
|
||||
isLocal bool
|
||||
}
|
||||
|
||||
type peerMap struct {
|
||||
mp map[string]peerEntry
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type peerNetworkMap struct {
|
||||
mp map[string]*peerMap
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (pKey peerKey) String() string {
|
||||
return fmt.Sprintf("%s %s", pKey.peerIP, pKey.peerMac)
|
||||
}
|
||||
|
||||
func (pKey *peerKey) Scan(state fmt.ScanState, verb rune) error {
|
||||
ipB, err := state.Token(true, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pKey.peerIP = net.ParseIP(string(ipB))
|
||||
|
||||
macB, err := state.Token(true, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pKey.peerMac, err = net.ParseMAC(string(macB))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var peerDbWg sync.WaitGroup
|
||||
|
||||
func (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error {
|
||||
d.peerDb.Lock()
|
||||
nids := []string{}
|
||||
for nid := range d.peerDb.mp {
|
||||
nids = append(nids, nid)
|
||||
}
|
||||
d.peerDb.Unlock()
|
||||
|
||||
for _, nid := range nids {
|
||||
d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
|
||||
return f(nid, pKey, pEntry)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool) error {
|
||||
d.peerDb.Lock()
|
||||
pMap, ok := d.peerDb.mp[nid]
|
||||
if !ok {
|
||||
d.peerDb.Unlock()
|
||||
return nil
|
||||
}
|
||||
d.peerDb.Unlock()
|
||||
|
||||
pMap.Lock()
|
||||
for pKeyStr, pEntry := range pMap.mp {
|
||||
var pKey peerKey
|
||||
if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {
|
||||
logrus.Warnf("Peer key scan on network %s failed: %v", nid, err)
|
||||
}
|
||||
|
||||
if f(&pKey, &pEntry) {
|
||||
pMap.Unlock()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
pMap.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) peerDbSearch(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {
|
||||
var (
|
||||
peerMac net.HardwareAddr
|
||||
vtep net.IP
|
||||
peerIPMask net.IPMask
|
||||
found bool
|
||||
)
|
||||
|
||||
err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
|
||||
if pKey.peerIP.Equal(peerIP) {
|
||||
peerMac = pKey.peerMac
|
||||
peerIPMask = pEntry.peerIPMask
|
||||
vtep = pEntry.vtep
|
||||
found = true
|
||||
return found
|
||||
}
|
||||
|
||||
return found
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("peerdb search for peer ip %q failed: %v", peerIP, err)
|
||||
}
|
||||
|
||||
if !found {
|
||||
return nil, nil, nil, fmt.Errorf("peer ip %q not found in peerdb", peerIP)
|
||||
}
|
||||
|
||||
return peerMac, peerIPMask, vtep, nil
|
||||
}
|
||||
|
||||
func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP, isLocal bool) {
|
||||
|
||||
peerDbWg.Wait()
|
||||
|
||||
d.peerDb.Lock()
|
||||
pMap, ok := d.peerDb.mp[nid]
|
||||
if !ok {
|
||||
d.peerDb.mp[nid] = &peerMap{
|
||||
mp: make(map[string]peerEntry),
|
||||
}
|
||||
|
||||
pMap = d.peerDb.mp[nid]
|
||||
}
|
||||
d.peerDb.Unlock()
|
||||
|
||||
pKey := peerKey{
|
||||
peerIP: peerIP,
|
||||
peerMac: peerMac,
|
||||
}
|
||||
|
||||
pEntry := peerEntry{
|
||||
eid: eid,
|
||||
vtep: vtep,
|
||||
peerIPMask: peerIPMask,
|
||||
isLocal: isLocal,
|
||||
}
|
||||
|
||||
pMap.Lock()
|
||||
pMap.mp[pKey.String()] = pEntry
|
||||
pMap.Unlock()
|
||||
}
|
||||
|
||||
func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP) {
|
||||
peerDbWg.Wait()
|
||||
|
||||
d.peerDb.Lock()
|
||||
pMap, ok := d.peerDb.mp[nid]
|
||||
if !ok {
|
||||
d.peerDb.Unlock()
|
||||
return
|
||||
}
|
||||
d.peerDb.Unlock()
|
||||
|
||||
pKey := peerKey{
|
||||
peerIP: peerIP,
|
||||
peerMac: peerMac,
|
||||
}
|
||||
|
||||
pMap.Lock()
|
||||
delete(pMap.mp, pKey.String())
|
||||
pMap.Unlock()
|
||||
}
|
||||
|
||||
func (d *driver) peerDbUpdateSandbox(nid string) {
|
||||
d.peerDb.Lock()
|
||||
pMap, ok := d.peerDb.mp[nid]
|
||||
if !ok {
|
||||
d.peerDb.Unlock()
|
||||
return
|
||||
}
|
||||
d.peerDb.Unlock()
|
||||
|
||||
peerDbWg.Add(1)
|
||||
|
||||
var peerOps []func()
|
||||
pMap.Lock()
|
||||
for pKeyStr, pEntry := range pMap.mp {
|
||||
var pKey peerKey
|
||||
if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {
|
||||
fmt.Printf("peer key scan failed: %v", err)
|
||||
}
|
||||
|
||||
if pEntry.isLocal {
|
||||
continue
|
||||
}
|
||||
|
||||
// Go captures variables by reference. The pEntry could be
|
||||
// pointing to the same memory location for every iteration. Make
|
||||
// a copy of pEntry before capturing it in the following closure.
|
||||
entry := pEntry
|
||||
op := func() {
|
||||
if err := d.peerAdd(nid, entry.eid, pKey.peerIP, entry.peerIPMask,
|
||||
pKey.peerMac, entry.vtep,
|
||||
false); err != nil {
|
||||
fmt.Printf("peerdbupdate in sandbox failed for ip %s and mac %s: %v",
|
||||
pKey.peerIP, pKey.peerMac, err)
|
||||
}
|
||||
}
|
||||
|
||||
peerOps = append(peerOps, op)
|
||||
}
|
||||
pMap.Unlock()
|
||||
|
||||
for _, op := range peerOps {
|
||||
op()
|
||||
}
|
||||
|
||||
peerDbWg.Done()
|
||||
}
|
||||
|
||||
func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {
|
||||
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if updateDb {
|
||||
d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, false)
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
sbox := n.sandbox()
|
||||
if sbox == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
IP := &net.IPNet{
|
||||
IP: peerIP,
|
||||
Mask: peerIPMask,
|
||||
}
|
||||
|
||||
s := n.getSubnetforIP(IP)
|
||||
if s == nil {
|
||||
return fmt.Errorf("couldn't find the subnet %q in network %q\n", IP.String(), n.id)
|
||||
}
|
||||
|
||||
if err := n.obtainVxlanID(s); err != nil {
|
||||
return fmt.Errorf("couldn't get vxlan id for %q: %v", s.subnetIP.String(), err)
|
||||
}
|
||||
|
||||
if err := n.joinSubnetSandbox(s, false); err != nil {
|
||||
return fmt.Errorf("subnet sandbox join failed for %q: %v", s.subnetIP.String(), err)
|
||||
}
|
||||
|
||||
if err := d.checkEncryption(nid, vtep, n.vxlanID(s), false, true); err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
// Add neighbor entry for the peer IP
|
||||
if err := sbox.AddNeighbor(peerIP, peerMac, sbox.NeighborOptions().LinkName(s.vxlanName)); err != nil {
|
||||
return fmt.Errorf("could not add neigbor entry into the sandbox: %v", err)
|
||||
}
|
||||
|
||||
// XXX Add fdb entry to the bridge for the peer mac
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {
|
||||
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if updateDb {
|
||||
d.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep)
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
sbox := n.sandbox()
|
||||
if sbox == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete fdb entry to the bridge for the peer mac
|
||||
if err := sbox.DeleteNeighbor(vtep, peerMac, true); err != nil {
|
||||
return fmt.Errorf("could not delete fdb entry into the sandbox: %v", err)
|
||||
}
|
||||
|
||||
// Delete neighbor entry for the peer IP
|
||||
if err := sbox.DeleteNeighbor(peerIP, peerMac, true); err != nil {
|
||||
return fmt.Errorf("could not delete neigbor entry into the sandbox: %v", err)
|
||||
}
|
||||
|
||||
if err := d.checkEncryption(nid, vtep, 0, false, false); err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) pushLocalDb() {
|
||||
d.peerDbWalk(func(nid string, pKey *peerKey, pEntry *peerEntry) bool {
|
||||
if pEntry.isLocal {
|
||||
d.pushLocalEndpointEvent("join", nid, pEntry.eid)
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
6
vendor/github.com/docker/libnetwork/drivers/windows/labels.go
generated
vendored
6
vendor/github.com/docker/libnetwork/drivers/windows/labels.go
generated
vendored
|
@ -30,4 +30,10 @@ const (
|
|||
|
||||
// SourceMac of the network
|
||||
SourceMac = "com.docker.network.windowsshim.sourcemac"
|
||||
|
||||
// DisableICC label
|
||||
DisableICC = "com.docker.network.windowsshim.disableicc"
|
||||
|
||||
// DisableDNS label
|
||||
DisableDNS = "com.docker.network.windowsshim.disable_dns"
|
||||
)
|
||||
|
|
112
vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go
generated
vendored
Normal file
112
vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
// Join method is invoked when a Sandbox is attached to an endpoint.
|
||||
func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return fmt.Errorf("could not find network with id %s", nid)
|
||||
}
|
||||
|
||||
ep := n.endpoint(eid)
|
||||
if ep == nil {
|
||||
return fmt.Errorf("could not find endpoint with id %s", eid)
|
||||
}
|
||||
|
||||
if err := d.writeEndpointToStore(ep); err != nil {
|
||||
return fmt.Errorf("failed to update overlay endpoint %s to local data store: %v", ep.id[0:7], err)
|
||||
}
|
||||
|
||||
buf, err := proto.Marshal(&PeerRecord{
|
||||
EndpointIP: ep.addr.String(),
|
||||
EndpointMAC: ep.mac.String(),
|
||||
TunnelEndpointIP: n.providerAddress,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil {
|
||||
logrus.Errorf("overlay: Failed adding table entry to joininfo: %v", err)
|
||||
}
|
||||
|
||||
d.pushLocalEndpointEvent("join", nid, eid)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) {
|
||||
if tableName != ovPeerTable {
|
||||
logrus.Errorf("Unexpected table notification for table %s received", tableName)
|
||||
return
|
||||
}
|
||||
|
||||
eid := key
|
||||
|
||||
var peer PeerRecord
|
||||
if err := proto.Unmarshal(value, &peer); err != nil {
|
||||
logrus.Errorf("Failed to unmarshal peer record: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore local peers. We already know about them and they
|
||||
// should not be added to vxlan fdb.
|
||||
if peer.TunnelEndpointIP == n.providerAddress {
|
||||
return
|
||||
}
|
||||
|
||||
addr, err := types.ParseCIDR(peer.EndpointIP)
|
||||
if err != nil {
|
||||
logrus.Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP)
|
||||
return
|
||||
}
|
||||
|
||||
mac, err := net.ParseMAC(peer.EndpointMAC)
|
||||
if err != nil {
|
||||
logrus.Errorf("Invalid mac %s received in event notify", peer.EndpointMAC)
|
||||
return
|
||||
}
|
||||
|
||||
vtep := net.ParseIP(peer.TunnelEndpointIP)
|
||||
if vtep == nil {
|
||||
logrus.Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP)
|
||||
return
|
||||
}
|
||||
|
||||
if etype == driverapi.Delete {
|
||||
d.peerDelete(nid, eid, addr.IP, addr.Mask, mac, vtep, true)
|
||||
return
|
||||
}
|
||||
|
||||
d.peerAdd(nid, eid, addr.IP, addr.Mask, mac, vtep, true)
|
||||
}
|
||||
|
||||
// Leave method is invoked when a Sandbox detaches from an endpoint.
|
||||
func (d *driver) Leave(nid, eid string) error {
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.pushLocalEndpointEvent("leave", nid, eid)
|
||||
|
||||
return nil
|
||||
}
|
346
vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go
generated
vendored
Normal file
346
vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,346 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
||||
type endpointTable map[string]*endpoint
|
||||
|
||||
const overlayEndpointPrefix = "overlay/endpoint"
|
||||
|
||||
type endpoint struct {
|
||||
id string
|
||||
nid string
|
||||
profileId string
|
||||
remote bool
|
||||
mac net.HardwareAddr
|
||||
addr *net.IPNet
|
||||
dbExists bool
|
||||
dbIndex uint64
|
||||
}
|
||||
|
||||
func validateID(nid, eid string) error {
|
||||
if nid == "" {
|
||||
return fmt.Errorf("invalid network id")
|
||||
}
|
||||
|
||||
if eid == "" {
|
||||
return fmt.Errorf("invalid endpoint id")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) endpoint(eid string) *endpoint {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
return n.endpoints[eid]
|
||||
}
|
||||
|
||||
func (n *network) addEndpoint(ep *endpoint) {
|
||||
n.Lock()
|
||||
n.endpoints[ep.id] = ep
|
||||
n.Unlock()
|
||||
}
|
||||
|
||||
func (n *network) deleteEndpoint(eid string) {
|
||||
n.Lock()
|
||||
delete(n.endpoints, eid)
|
||||
n.Unlock()
|
||||
}
|
||||
|
||||
func (n *network) removeEndpointWithAddress(addr *net.IPNet) {
|
||||
var networkEndpoint *endpoint
|
||||
n.Lock()
|
||||
for _, ep := range n.endpoints {
|
||||
if ep.addr.IP.Equal(addr.IP) {
|
||||
networkEndpoint = ep
|
||||
break
|
||||
}
|
||||
}
|
||||
if networkEndpoint != nil {
|
||||
delete(n.endpoints, networkEndpoint.id)
|
||||
}
|
||||
n.Unlock()
|
||||
|
||||
if networkEndpoint != nil {
|
||||
logrus.Debugf("Removing stale endpoint from HNS")
|
||||
_, err := hcsshim.HNSEndpointRequest("DELETE", networkEndpoint.profileId, "")
|
||||
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to delete stale overlay endpoint (%s) from hns", networkEndpoint.id[0:7])
|
||||
}
|
||||
|
||||
if err := n.driver.deleteEndpointFromStore(networkEndpoint); err != nil {
|
||||
logrus.Debugf("Failed to delete stale overlay endpoint (%s) from store", networkEndpoint.id[0:7])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
|
||||
epOptions map[string]interface{}) error {
|
||||
var err error
|
||||
if err = validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since we perform lazy configuration make sure we try
|
||||
// configuring the driver when we enter CreateEndpoint since
|
||||
// CreateNetwork may not be called in every node.
|
||||
if err := d.configure(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return fmt.Errorf("network id %q not found", nid)
|
||||
}
|
||||
|
||||
ep := &endpoint{
|
||||
id: eid,
|
||||
nid: n.id,
|
||||
addr: ifInfo.Address(),
|
||||
mac: ifInfo.MacAddress(),
|
||||
}
|
||||
|
||||
if ep.addr == nil {
|
||||
return fmt.Errorf("create endpoint was not passed interface IP address")
|
||||
}
|
||||
|
||||
if s := n.getSubnetforIP(ep.addr); s == nil {
|
||||
return fmt.Errorf("no matching subnet for IP %q in network %q\n", ep.addr, nid)
|
||||
}
|
||||
|
||||
// Todo: Add port bindings and qos policies here
|
||||
|
||||
hnsEndpoint := &hcsshim.HNSEndpoint{
|
||||
VirtualNetwork: n.hnsId,
|
||||
IPAddress: ep.addr.IP,
|
||||
EnableInternalDNS: true,
|
||||
}
|
||||
|
||||
if ep.mac != nil {
|
||||
hnsEndpoint.MacAddress = ep.mac.String()
|
||||
}
|
||||
|
||||
paPolicy, err := json.Marshal(hcsshim.PaPolicy{
|
||||
Type: "PA",
|
||||
PA: n.providerAddress,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hnsEndpoint.Policies = append(hnsEndpoint.Policies, paPolicy)
|
||||
|
||||
configurationb, err := json.Marshal(hnsEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hnsresponse, err := hcsshim.HNSEndpointRequest("POST", "", string(configurationb))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ep.profileId = hnsresponse.Id
|
||||
|
||||
if ep.mac == nil {
|
||||
ep.mac, err = net.ParseMAC(hnsresponse.MacAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ifInfo.SetMacAddress(ep.mac); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
n.addEndpoint(ep)
|
||||
if err := d.writeEndpointToStore(ep); err != nil {
|
||||
return fmt.Errorf("failed to update overlay endpoint %s to local store: %v", ep.id[0:7], err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) DeleteEndpoint(nid, eid string) error {
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return fmt.Errorf("network id %q not found", nid)
|
||||
}
|
||||
|
||||
ep := n.endpoint(eid)
|
||||
if ep == nil {
|
||||
return fmt.Errorf("endpoint id %q not found", eid)
|
||||
}
|
||||
|
||||
n.deleteEndpoint(eid)
|
||||
|
||||
if err := d.deleteEndpointFromStore(ep); err != nil {
|
||||
logrus.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err)
|
||||
}
|
||||
|
||||
_, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileId, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) {
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return nil, fmt.Errorf("network id %q not found", nid)
|
||||
}
|
||||
|
||||
ep := n.endpoint(eid)
|
||||
if ep == nil {
|
||||
return nil, fmt.Errorf("endpoint id %q not found", eid)
|
||||
}
|
||||
|
||||
data := make(map[string]interface{}, 1)
|
||||
data["hnsid"] = ep.profileId
|
||||
data["AllowUnqualifiedDNSQuery"] = true
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (d *driver) deleteEndpointFromStore(e *endpoint) error {
|
||||
if d.localStore == nil {
|
||||
return fmt.Errorf("overlay local store not initialized, ep not deleted")
|
||||
}
|
||||
|
||||
if err := d.localStore.DeleteObjectAtomic(e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) writeEndpointToStore(e *endpoint) error {
|
||||
if d.localStore == nil {
|
||||
return fmt.Errorf("overlay local store not initialized, ep not added")
|
||||
}
|
||||
|
||||
if err := d.localStore.PutObjectAtomic(e); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ep *endpoint) DataScope() string {
|
||||
return datastore.LocalScope
|
||||
}
|
||||
|
||||
func (ep *endpoint) New() datastore.KVObject {
|
||||
return &endpoint{}
|
||||
}
|
||||
|
||||
func (ep *endpoint) CopyTo(o datastore.KVObject) error {
|
||||
dstep := o.(*endpoint)
|
||||
*dstep = *ep
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ep *endpoint) Key() []string {
|
||||
return []string{overlayEndpointPrefix, ep.id}
|
||||
}
|
||||
|
||||
func (ep *endpoint) KeyPrefix() []string {
|
||||
return []string{overlayEndpointPrefix}
|
||||
}
|
||||
|
||||
func (ep *endpoint) Index() uint64 {
|
||||
return ep.dbIndex
|
||||
}
|
||||
|
||||
func (ep *endpoint) SetIndex(index uint64) {
|
||||
ep.dbIndex = index
|
||||
ep.dbExists = true
|
||||
}
|
||||
|
||||
func (ep *endpoint) Exists() bool {
|
||||
return ep.dbExists
|
||||
}
|
||||
|
||||
func (ep *endpoint) Skip() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (ep *endpoint) Value() []byte {
|
||||
b, err := json.Marshal(ep)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (ep *endpoint) SetValue(value []byte) error {
|
||||
return json.Unmarshal(value, ep)
|
||||
}
|
||||
|
||||
func (ep *endpoint) MarshalJSON() ([]byte, error) {
|
||||
epMap := make(map[string]interface{})
|
||||
|
||||
epMap["id"] = ep.id
|
||||
epMap["nid"] = ep.nid
|
||||
epMap["remote"] = ep.remote
|
||||
if ep.profileId != "" {
|
||||
epMap["profileId"] = ep.profileId
|
||||
}
|
||||
|
||||
if ep.addr != nil {
|
||||
epMap["addr"] = ep.addr.String()
|
||||
}
|
||||
if len(ep.mac) != 0 {
|
||||
epMap["mac"] = ep.mac.String()
|
||||
}
|
||||
|
||||
return json.Marshal(epMap)
|
||||
}
|
||||
|
||||
func (ep *endpoint) UnmarshalJSON(value []byte) error {
|
||||
var (
|
||||
err error
|
||||
epMap map[string]interface{}
|
||||
)
|
||||
|
||||
json.Unmarshal(value, &epMap)
|
||||
|
||||
ep.id = epMap["id"].(string)
|
||||
ep.nid = epMap["nid"].(string)
|
||||
ep.remote = epMap["remote"].(bool)
|
||||
if v, ok := epMap["profileId"]; ok {
|
||||
ep.profileId = v.(string)
|
||||
}
|
||||
if v, ok := epMap["mac"]; ok {
|
||||
if ep.mac, err = net.ParseMAC(v.(string)); err != nil {
|
||||
return types.InternalErrorf("failed to decode endpoint interface mac address after json unmarshal: %s", v.(string))
|
||||
}
|
||||
}
|
||||
if v, ok := epMap["addr"]; ok {
|
||||
if ep.addr, err = types.ParseCIDR(v.(string)); err != nil {
|
||||
return types.InternalErrorf("failed to decode endpoint interface ipv4 address after json unmarshal: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
209
vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_local_windows.go
generated
vendored
Normal file
209
vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_local_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,209 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
)
|
||||
|
||||
const overlayNetworkPrefix = "overlay/network"
|
||||
|
||||
type localNetwork struct {
|
||||
id string
|
||||
hnsID string
|
||||
providerAddress string
|
||||
dbIndex uint64
|
||||
dbExists bool
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (d *driver) findHnsNetwork(n *network) error {
|
||||
ln, err := d.getLocalNetworkFromStore(n.id)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ln == nil {
|
||||
subnets := []hcsshim.Subnet{}
|
||||
|
||||
for _, s := range n.subnets {
|
||||
subnet := hcsshim.Subnet{
|
||||
AddressPrefix: s.subnetIP.String(),
|
||||
}
|
||||
|
||||
if s.gwIP != nil {
|
||||
subnet.GatewayAddress = s.gwIP.IP.String()
|
||||
}
|
||||
|
||||
vsidPolicy, err := json.Marshal(hcsshim.VsidPolicy{
|
||||
Type: "VSID",
|
||||
VSID: uint(s.vni),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subnet.Policies = append(subnet.Policies, vsidPolicy)
|
||||
subnets = append(subnets, subnet)
|
||||
}
|
||||
|
||||
network := &hcsshim.HNSNetwork{
|
||||
Name: n.name,
|
||||
Type: d.Type(),
|
||||
Subnets: subnets,
|
||||
NetworkAdapterName: n.interfaceName,
|
||||
}
|
||||
|
||||
configurationb, err := json.Marshal(network)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configuration := string(configurationb)
|
||||
logrus.Infof("HNSNetwork Request =%v", configuration)
|
||||
|
||||
hnsresponse, err := hcsshim.HNSNetworkRequest("POST", "", configuration)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n.hnsId = hnsresponse.Id
|
||||
n.providerAddress = hnsresponse.ManagementIP
|
||||
|
||||
// Save local host specific info
|
||||
if err := d.writeLocalNetworkToStore(n); err != nil {
|
||||
return fmt.Errorf("failed to update data store for network %v: %v", n.id, err)
|
||||
}
|
||||
} else {
|
||||
n.hnsId = ln.hnsID
|
||||
n.providerAddress = ln.providerAddress
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) getLocalNetworkFromStore(nid string) (*localNetwork, error) {
|
||||
|
||||
if d.localStore == nil {
|
||||
return nil, fmt.Errorf("overlay local store not initialized, network not found")
|
||||
}
|
||||
|
||||
n := &localNetwork{id: nid}
|
||||
if err := d.localStore.GetObject(datastore.Key(n.Key()...), n); err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (d *driver) deleteLocalNetworkFromStore(n *network) error {
|
||||
if d.localStore == nil {
|
||||
return fmt.Errorf("overlay local store not initialized, network not deleted")
|
||||
}
|
||||
|
||||
ln, err := d.getLocalNetworkFromStore(n.id)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = d.localStore.DeleteObjectAtomic(ln); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) writeLocalNetworkToStore(n *network) error {
|
||||
if d.localStore == nil {
|
||||
return fmt.Errorf("overlay local store not initialized, network not added")
|
||||
}
|
||||
|
||||
ln := &localNetwork{
|
||||
id: n.id,
|
||||
hnsID: n.hnsId,
|
||||
providerAddress: n.providerAddress,
|
||||
}
|
||||
|
||||
if err := d.localStore.PutObjectAtomic(ln); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *localNetwork) DataScope() string {
|
||||
return datastore.LocalScope
|
||||
}
|
||||
|
||||
func (n *localNetwork) New() datastore.KVObject {
|
||||
return &localNetwork{}
|
||||
}
|
||||
|
||||
func (n *localNetwork) CopyTo(o datastore.KVObject) error {
|
||||
dstep := o.(*localNetwork)
|
||||
*dstep = *n
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *localNetwork) Key() []string {
|
||||
return []string{overlayNetworkPrefix, n.id}
|
||||
}
|
||||
|
||||
func (n *localNetwork) KeyPrefix() []string {
|
||||
return []string{overlayNetworkPrefix}
|
||||
}
|
||||
|
||||
func (n *localNetwork) Index() uint64 {
|
||||
return n.dbIndex
|
||||
}
|
||||
|
||||
func (n *localNetwork) SetIndex(index uint64) {
|
||||
n.dbIndex = index
|
||||
n.dbExists = true
|
||||
}
|
||||
|
||||
func (n *localNetwork) Exists() bool {
|
||||
return n.dbExists
|
||||
}
|
||||
|
||||
func (n *localNetwork) Skip() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *localNetwork) Value() []byte {
|
||||
b, err := json.Marshal(n)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (n *localNetwork) SetValue(value []byte) error {
|
||||
return json.Unmarshal(value, n)
|
||||
}
|
||||
|
||||
func (n *localNetwork) MarshalJSON() ([]byte, error) {
|
||||
networkMap := make(map[string]interface{})
|
||||
|
||||
networkMap["id"] = n.id
|
||||
networkMap["hnsID"] = n.hnsID
|
||||
networkMap["providerAddress"] = n.providerAddress
|
||||
return json.Marshal(networkMap)
|
||||
}
|
||||
|
||||
func (n *localNetwork) UnmarshalJSON(value []byte) error {
|
||||
var networkMap map[string]interface{}
|
||||
|
||||
json.Unmarshal(value, &networkMap)
|
||||
|
||||
n.id = networkMap["id"].(string)
|
||||
n.hnsID = networkMap["hnsID"].(string)
|
||||
n.providerAddress = networkMap["providerAddress"].(string)
|
||||
return nil
|
||||
}
|
512
vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go
generated
vendored
Normal file
512
vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,512 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/netlabel"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
||||
var (
|
||||
hostMode bool
|
||||
networkMu sync.Mutex
|
||||
)
|
||||
|
||||
type networkTable map[string]*network
|
||||
|
||||
type subnet struct {
|
||||
vni uint32
|
||||
initErr error
|
||||
subnetIP *net.IPNet
|
||||
gwIP *net.IPNet
|
||||
}
|
||||
|
||||
type subnetJSON struct {
|
||||
SubnetIP string
|
||||
GwIP string
|
||||
Vni uint32
|
||||
}
|
||||
|
||||
type network struct {
|
||||
id string
|
||||
name string
|
||||
hnsId string
|
||||
dbIndex uint64
|
||||
dbExists bool
|
||||
providerAddress string
|
||||
interfaceName string
|
||||
endpoints endpointTable
|
||||
driver *driver
|
||||
initEpoch int
|
||||
initErr error
|
||||
subnets []*subnet
|
||||
secure bool
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) {
|
||||
return nil, types.NotImplementedErrorf("not implemented")
|
||||
}
|
||||
|
||||
func (d *driver) NetworkFree(id string) error {
|
||||
return types.NotImplementedErrorf("not implemented")
|
||||
}
|
||||
|
||||
func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error {
|
||||
var (
|
||||
networkName string
|
||||
interfaceName string
|
||||
)
|
||||
|
||||
if id == "" {
|
||||
return fmt.Errorf("invalid network id")
|
||||
}
|
||||
|
||||
if len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == "0.0.0.0/0" {
|
||||
return types.BadRequestErrorf("ipv4 pool is empty")
|
||||
}
|
||||
|
||||
vnis := make([]uint32, 0, len(ipV4Data))
|
||||
|
||||
// Since we perform lazy configuration make sure we try
|
||||
// configuring the driver when we enter CreateNetwork
|
||||
if err := d.configure(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := &network{
|
||||
id: id,
|
||||
driver: d,
|
||||
endpoints: endpointTable{},
|
||||
subnets: []*subnet{},
|
||||
}
|
||||
|
||||
genData, ok := option[netlabel.GenericData].(map[string]string)
|
||||
|
||||
if !ok {
|
||||
return fmt.Errorf("Unknown generic data option")
|
||||
}
|
||||
|
||||
for label, value := range genData {
|
||||
switch label {
|
||||
case "com.docker.network.windowsshim.networkname":
|
||||
networkName = value
|
||||
case "com.docker.network.windowsshim.interface":
|
||||
interfaceName = value
|
||||
case "com.docker.network.windowsshim.hnsid":
|
||||
n.hnsId = value
|
||||
case netlabel.OverlayVxlanIDList:
|
||||
vniStrings := strings.Split(value, ",")
|
||||
for _, vniStr := range vniStrings {
|
||||
vni, err := strconv.Atoi(vniStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid vxlan id value %q passed", vniStr)
|
||||
}
|
||||
|
||||
vnis = append(vnis, uint32(vni))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we are getting vnis from libnetwork, either we get for
|
||||
// all subnets or none.
|
||||
if len(vnis) != 0 && len(vnis) < len(ipV4Data) {
|
||||
return fmt.Errorf("insufficient vnis(%d) passed to overlay", len(vnis))
|
||||
}
|
||||
|
||||
for i, ipd := range ipV4Data {
|
||||
s := &subnet{
|
||||
subnetIP: ipd.Pool,
|
||||
gwIP: ipd.Gateway,
|
||||
}
|
||||
|
||||
if len(vnis) != 0 {
|
||||
s.vni = vnis[i]
|
||||
}
|
||||
|
||||
n.subnets = append(n.subnets, s)
|
||||
}
|
||||
|
||||
n.name = networkName
|
||||
if n.name == "" {
|
||||
n.name = id
|
||||
}
|
||||
|
||||
n.interfaceName = interfaceName
|
||||
|
||||
if err := n.writeToStore(); err != nil {
|
||||
return fmt.Errorf("failed to update data store for network %v: %v", n.id, err)
|
||||
}
|
||||
|
||||
if nInfo != nil {
|
||||
if err := nInfo.TableEventRegister(ovPeerTable); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.addNetwork(n)
|
||||
|
||||
err := d.findHnsNetwork(n)
|
||||
genData["com.docker.network.windowsshim.hnsid"] = n.hnsId
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *driver) DeleteNetwork(nid string) error {
|
||||
if nid == "" {
|
||||
return fmt.Errorf("invalid network id")
|
||||
}
|
||||
|
||||
// Make sure driver resources are initialized before proceeding
|
||||
if err := d.configure(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return fmt.Errorf("could not find network with id %s", nid)
|
||||
}
|
||||
|
||||
_, err := hcsshim.HNSNetworkRequest("DELETE", n.hnsId, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.deleteNetwork(nid)
|
||||
d.deleteLocalNetworkFromStore(n)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) RevokeExternalConnectivity(nid, eid string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) addNetwork(n *network) {
|
||||
d.Lock()
|
||||
d.networks[n.id] = n
|
||||
d.Unlock()
|
||||
}
|
||||
|
||||
func (d *driver) deleteNetwork(nid string) {
|
||||
d.Lock()
|
||||
delete(d.networks, nid)
|
||||
d.Unlock()
|
||||
}
|
||||
|
||||
func (d *driver) network(nid string) *network {
|
||||
d.Lock()
|
||||
networks := d.networks
|
||||
d.Unlock()
|
||||
|
||||
n, ok := networks[nid]
|
||||
if !ok {
|
||||
n = d.getNetworkFromStore(nid)
|
||||
if n != nil {
|
||||
n.driver = d
|
||||
n.endpoints = endpointTable{}
|
||||
networks[nid] = n
|
||||
}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func (d *driver) getNetworkFromStore(nid string) *network {
|
||||
if d.store == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
n := &network{id: nid}
|
||||
if err := d.store.GetObject(datastore.Key(n.Key()...), n); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// As the network is being discovered from the global store, HNS may not be aware of it yet
|
||||
err := d.findHnsNetwork(n)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to find hns network: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func (n *network) vxlanID(s *subnet) uint32 {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
return s.vni
|
||||
}
|
||||
|
||||
func (n *network) setVxlanID(s *subnet, vni uint32) {
|
||||
n.Lock()
|
||||
s.vni = vni
|
||||
n.Unlock()
|
||||
}
|
||||
|
||||
func (n *network) Key() []string {
|
||||
return []string{"overlay", "network", n.id}
|
||||
}
|
||||
|
||||
func (n *network) KeyPrefix() []string {
|
||||
return []string{"overlay", "network"}
|
||||
}
|
||||
|
||||
func (n *network) Value() []byte {
|
||||
m := map[string]interface{}{}
|
||||
|
||||
netJSON := []*subnetJSON{}
|
||||
|
||||
for _, s := range n.subnets {
|
||||
sj := &subnetJSON{
|
||||
SubnetIP: s.subnetIP.String(),
|
||||
GwIP: s.gwIP.String(),
|
||||
Vni: s.vni,
|
||||
}
|
||||
netJSON = append(netJSON, sj)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(netJSON)
|
||||
if err != nil {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
m["secure"] = n.secure
|
||||
m["subnets"] = netJSON
|
||||
m["interfaceName"] = n.interfaceName
|
||||
m["providerAddress"] = n.providerAddress
|
||||
m["hnsId"] = n.hnsId
|
||||
m["name"] = n.name
|
||||
b, err = json.Marshal(m)
|
||||
if err != nil {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (n *network) Index() uint64 {
|
||||
return n.dbIndex
|
||||
}
|
||||
|
||||
func (n *network) SetIndex(index uint64) {
|
||||
n.dbIndex = index
|
||||
n.dbExists = true
|
||||
}
|
||||
|
||||
func (n *network) Exists() bool {
|
||||
return n.dbExists
|
||||
}
|
||||
|
||||
func (n *network) Skip() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *network) SetValue(value []byte) error {
|
||||
var (
|
||||
m map[string]interface{}
|
||||
newNet bool
|
||||
isMap = true
|
||||
netJSON = []*subnetJSON{}
|
||||
)
|
||||
|
||||
if err := json.Unmarshal(value, &m); err != nil {
|
||||
err := json.Unmarshal(value, &netJSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isMap = false
|
||||
}
|
||||
|
||||
if len(n.subnets) == 0 {
|
||||
newNet = true
|
||||
}
|
||||
|
||||
if isMap {
|
||||
if val, ok := m["secure"]; ok {
|
||||
n.secure = val.(bool)
|
||||
}
|
||||
if val, ok := m["providerAddress"]; ok {
|
||||
n.providerAddress = val.(string)
|
||||
}
|
||||
if val, ok := m["interfaceName"]; ok {
|
||||
n.interfaceName = val.(string)
|
||||
}
|
||||
if val, ok := m["hnsId"]; ok {
|
||||
n.hnsId = val.(string)
|
||||
}
|
||||
if val, ok := m["name"]; ok {
|
||||
n.name = val.(string)
|
||||
}
|
||||
bytes, err := json.Marshal(m["subnets"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := json.Unmarshal(bytes, &netJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, sj := range netJSON {
|
||||
subnetIPstr := sj.SubnetIP
|
||||
gwIPstr := sj.GwIP
|
||||
vni := sj.Vni
|
||||
|
||||
subnetIP, _ := types.ParseCIDR(subnetIPstr)
|
||||
gwIP, _ := types.ParseCIDR(gwIPstr)
|
||||
|
||||
if newNet {
|
||||
s := &subnet{
|
||||
subnetIP: subnetIP,
|
||||
gwIP: gwIP,
|
||||
vni: vni,
|
||||
}
|
||||
n.subnets = append(n.subnets, s)
|
||||
} else {
|
||||
sNet := n.getMatchingSubnet(subnetIP)
|
||||
if sNet != nil {
|
||||
sNet.vni = vni
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) DataScope() string {
|
||||
return datastore.GlobalScope
|
||||
}
|
||||
|
||||
func (n *network) writeToStore() error {
|
||||
if n.driver.store == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return n.driver.store.PutObjectAtomic(n)
|
||||
}
|
||||
|
||||
func (n *network) releaseVxlanID() ([]uint32, error) {
|
||||
if len(n.subnets) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if n.driver.store != nil {
|
||||
if err := n.driver.store.DeleteObjectAtomic(n); err != nil {
|
||||
if err == datastore.ErrKeyModified || err == datastore.ErrKeyNotFound {
|
||||
// In both the above cases we can safely assume that the key has been removed by some other
|
||||
// instance and so simply get out of here
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to delete network to vxlan id map: %v", err)
|
||||
}
|
||||
}
|
||||
var vnis []uint32
|
||||
for _, s := range n.subnets {
|
||||
if n.driver.vxlanIdm != nil {
|
||||
vni := n.vxlanID(s)
|
||||
vnis = append(vnis, vni)
|
||||
n.driver.vxlanIdm.Release(uint64(vni))
|
||||
}
|
||||
|
||||
n.setVxlanID(s, 0)
|
||||
}
|
||||
|
||||
return vnis, nil
|
||||
}
|
||||
|
||||
func (n *network) obtainVxlanID(s *subnet) error {
|
||||
//return if the subnet already has a vxlan id assigned
|
||||
if s.vni != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if n.driver.store == nil {
|
||||
return fmt.Errorf("no valid vxlan id and no datastore configured, cannot obtain vxlan id")
|
||||
}
|
||||
|
||||
for {
|
||||
if err := n.driver.store.GetObject(datastore.Key(n.Key()...), n); err != nil {
|
||||
return fmt.Errorf("getting network %q from datastore failed %v", n.id, err)
|
||||
}
|
||||
|
||||
if s.vni == 0 {
|
||||
vxlanID, err := n.driver.vxlanIdm.GetID()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to allocate vxlan id: %v", err)
|
||||
}
|
||||
|
||||
n.setVxlanID(s, uint32(vxlanID))
|
||||
if err := n.writeToStore(); err != nil {
|
||||
n.driver.vxlanIdm.Release(uint64(n.vxlanID(s)))
|
||||
n.setVxlanID(s, 0)
|
||||
if err == datastore.ErrKeyModified {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("network %q failed to update data store: %v", n.id, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// contains return true if the passed ip belongs to one the network's
|
||||
// subnets
|
||||
func (n *network) contains(ip net.IP) bool {
|
||||
for _, s := range n.subnets {
|
||||
if s.subnetIP.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// getSubnetforIP returns the subnet to which the given IP belongs
|
||||
func (n *network) getSubnetforIP(ip *net.IPNet) *subnet {
|
||||
for _, s := range n.subnets {
|
||||
// first check if the mask lengths are the same
|
||||
i, _ := s.subnetIP.Mask.Size()
|
||||
j, _ := ip.Mask.Size()
|
||||
if i != j {
|
||||
continue
|
||||
}
|
||||
if s.subnetIP.Contains(ip.IP) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getMatchingSubnet return the network's subnet that matches the input
|
||||
func (n *network) getMatchingSubnet(ip *net.IPNet) *subnet {
|
||||
if ip == nil {
|
||||
return nil
|
||||
}
|
||||
for _, s := range n.subnets {
|
||||
// first check if the mask lengths are the same
|
||||
i, _ := s.subnetIP.Mask.Size()
|
||||
j, _ := ip.Mask.Size()
|
||||
if i != j {
|
||||
continue
|
||||
}
|
||||
if s.subnetIP.IP.Equal(ip.IP) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
179
vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_serf_windows.go
generated
vendored
Normal file
179
vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_serf_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,179 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
||||
type ovNotify struct {
|
||||
action string
|
||||
ep *endpoint
|
||||
nw *network
|
||||
}
|
||||
|
||||
type logWriter struct{}
|
||||
|
||||
func (l *logWriter) Write(p []byte) (int, error) {
|
||||
str := string(p)
|
||||
|
||||
switch {
|
||||
case strings.Contains(str, "[WARN]"):
|
||||
logrus.Warn(str)
|
||||
case strings.Contains(str, "[DEBUG]"):
|
||||
logrus.Debug(str)
|
||||
case strings.Contains(str, "[INFO]"):
|
||||
logrus.Info(str)
|
||||
case strings.Contains(str, "[ERR]"):
|
||||
logrus.Error(str)
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (d *driver) serfInit() error {
|
||||
var err error
|
||||
|
||||
config := serf.DefaultConfig()
|
||||
config.Init()
|
||||
config.MemberlistConfig.BindAddr = d.bindAddress
|
||||
|
||||
d.eventCh = make(chan serf.Event, 4)
|
||||
config.EventCh = d.eventCh
|
||||
config.UserCoalescePeriod = 1 * time.Second
|
||||
config.UserQuiescentPeriod = 50 * time.Millisecond
|
||||
|
||||
config.LogOutput = &logWriter{}
|
||||
config.MemberlistConfig.LogOutput = config.LogOutput
|
||||
|
||||
s, err := serf.Create(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cluster node: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
d.serfInstance = s
|
||||
|
||||
d.notifyCh = make(chan ovNotify)
|
||||
d.exitCh = make(chan chan struct{})
|
||||
|
||||
go d.startSerfLoop(d.eventCh, d.notifyCh, d.exitCh)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) serfJoin(neighIP string) error {
|
||||
if neighIP == "" {
|
||||
return fmt.Errorf("no neighbor to join")
|
||||
}
|
||||
if _, err := d.serfInstance.Join([]string{neighIP}, false); err != nil {
|
||||
return fmt.Errorf("Failed to join the cluster at neigh IP %s: %v",
|
||||
neighIP, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) notifyEvent(event ovNotify) {
|
||||
ep := event.ep
|
||||
|
||||
ePayload := fmt.Sprintf("%s %s %s %s", event.action, ep.addr.IP.String(),
|
||||
net.IP(ep.addr.Mask).String(), ep.mac.String())
|
||||
eName := fmt.Sprintf("jl %s %s %s", d.serfInstance.LocalMember().Addr.String(),
|
||||
event.nw.id, ep.id)
|
||||
|
||||
if err := d.serfInstance.UserEvent(eName, []byte(ePayload), true); err != nil {
|
||||
logrus.Errorf("Sending user event failed: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) processEvent(u serf.UserEvent) {
|
||||
logrus.Debugf("Received user event name:%s, payload:%s\n", u.Name,
|
||||
string(u.Payload))
|
||||
|
||||
var dummy, action, vtepStr, nid, eid, ipStr, maskStr, macStr string
|
||||
if _, err := fmt.Sscan(u.Name, &dummy, &vtepStr, &nid, &eid); err != nil {
|
||||
fmt.Printf("Failed to scan name string: %v\n", err)
|
||||
}
|
||||
|
||||
if _, err := fmt.Sscan(string(u.Payload), &action,
|
||||
&ipStr, &maskStr, &macStr); err != nil {
|
||||
fmt.Printf("Failed to scan value string: %v\n", err)
|
||||
}
|
||||
|
||||
logrus.Debugf("Parsed data = %s/%s/%s/%s/%s/%s\n", nid, eid, vtepStr, ipStr, maskStr, macStr)
|
||||
|
||||
mac, err := net.ParseMAC(macStr)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to parse mac: %v\n", err)
|
||||
}
|
||||
|
||||
if d.serfInstance.LocalMember().Addr.String() == vtepStr {
|
||||
return
|
||||
}
|
||||
|
||||
switch action {
|
||||
case "join":
|
||||
if err := d.peerAdd(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac,
|
||||
net.ParseIP(vtepStr), true); err != nil {
|
||||
logrus.Errorf("Peer add failed in the driver: %v\n", err)
|
||||
}
|
||||
case "leave":
|
||||
if err := d.peerDelete(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac,
|
||||
net.ParseIP(vtepStr), true); err != nil {
|
||||
logrus.Errorf("Peer delete failed in the driver: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) startSerfLoop(eventCh chan serf.Event, notifyCh chan ovNotify,
|
||||
exitCh chan chan struct{}) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case notify, ok := <-notifyCh:
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
d.notifyEvent(notify)
|
||||
case ch, ok := <-exitCh:
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if err := d.serfInstance.Leave(); err != nil {
|
||||
logrus.Errorf("failed leaving the cluster: %v\n", err)
|
||||
}
|
||||
|
||||
d.serfInstance.Shutdown()
|
||||
close(ch)
|
||||
return
|
||||
case e, ok := <-eventCh:
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
u, ok := e.(serf.UserEvent)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
d.processEvent(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) isSerfAlive() bool {
|
||||
d.Lock()
|
||||
serfInstance := d.serfInstance
|
||||
d.Unlock()
|
||||
if serfInstance == nil || serfInstance.State() != serf.SerfAlive {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
468
vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay.pb.go
generated
vendored
Normal file
468
vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,468 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: overlay.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package overlay is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
overlay.proto
|
||||
|
||||
It has these top-level messages:
|
||||
PeerRecord
|
||||
*/
|
||||
package overlay
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
import strings "strings"
|
||||
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
import sort "sort"
|
||||
import strconv "strconv"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.GoGoProtoPackageIsVersion1
|
||||
|
||||
// PeerRecord defines the information corresponding to a peer
|
||||
// container in the overlay network.
|
||||
type PeerRecord struct {
|
||||
// Endpoint IP is the IP of the container attachment on the
|
||||
// given overlay network.
|
||||
EndpointIP string `protobuf:"bytes,1,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"`
|
||||
// Endpoint MAC is the mac address of the container attachment
|
||||
// on the given overlay network.
|
||||
EndpointMAC string `protobuf:"bytes,2,opt,name=endpoint_mac,json=endpointMac,proto3" json:"endpoint_mac,omitempty"`
|
||||
// Tunnel Endpoint IP defines the host IP for the host in
|
||||
// which this container is running and can be reached by
|
||||
// building a tunnel to that host IP.
|
||||
TunnelEndpointIP string `protobuf:"bytes,3,opt,name=tunnel_endpoint_ip,json=tunnelEndpointIp,proto3" json:"tunnel_endpoint_ip,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PeerRecord) Reset() { *m = PeerRecord{} }
|
||||
func (*PeerRecord) ProtoMessage() {}
|
||||
func (*PeerRecord) Descriptor() ([]byte, []int) { return fileDescriptorOverlay, []int{0} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*PeerRecord)(nil), "overlay.PeerRecord")
|
||||
}
|
||||
func (this *PeerRecord) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 7)
|
||||
s = append(s, "&overlay.PeerRecord{")
|
||||
s = append(s, "EndpointIP: "+fmt.Sprintf("%#v", this.EndpointIP)+",\n")
|
||||
s = append(s, "EndpointMAC: "+fmt.Sprintf("%#v", this.EndpointMAC)+",\n")
|
||||
s = append(s, "TunnelEndpointIP: "+fmt.Sprintf("%#v", this.TunnelEndpointIP)+",\n")
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringOverlay(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func extensionToGoStringOverlay(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
|
||||
if e == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := "map[int32]proto.Extension{"
|
||||
keys := make([]int, 0, len(e))
|
||||
for k := range e {
|
||||
keys = append(keys, int(k))
|
||||
}
|
||||
sort.Ints(keys)
|
||||
ss := []string{}
|
||||
for _, k := range keys {
|
||||
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
|
||||
}
|
||||
s += strings.Join(ss, ",") + "}"
|
||||
return s
|
||||
}
|
||||
func (m *PeerRecord) Marshal() (data []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
}
|
||||
|
||||
func (m *PeerRecord) MarshalTo(data []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.EndpointIP) > 0 {
|
||||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintOverlay(data, i, uint64(len(m.EndpointIP)))
|
||||
i += copy(data[i:], m.EndpointIP)
|
||||
}
|
||||
if len(m.EndpointMAC) > 0 {
|
||||
data[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintOverlay(data, i, uint64(len(m.EndpointMAC)))
|
||||
i += copy(data[i:], m.EndpointMAC)
|
||||
}
|
||||
if len(m.TunnelEndpointIP) > 0 {
|
||||
data[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintOverlay(data, i, uint64(len(m.TunnelEndpointIP)))
|
||||
i += copy(data[i:], m.TunnelEndpointIP)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Overlay(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Overlay(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintOverlay(data []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *PeerRecord) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.EndpointIP)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovOverlay(uint64(l))
|
||||
}
|
||||
l = len(m.EndpointMAC)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovOverlay(uint64(l))
|
||||
}
|
||||
l = len(m.TunnelEndpointIP)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovOverlay(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovOverlay(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozOverlay(x uint64) (n int) {
|
||||
return sovOverlay(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *PeerRecord) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&PeerRecord{`,
|
||||
`EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`,
|
||||
`EndpointMAC:` + fmt.Sprintf("%v", this.EndpointMAC) + `,`,
|
||||
`TunnelEndpointIP:` + fmt.Sprintf("%v", this.TunnelEndpointIP) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringOverlay(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *PeerRecord) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: PeerRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: PeerRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field EndpointIP", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthOverlay
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.EndpointIP = string(data[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field EndpointMAC", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthOverlay
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.EndpointMAC = string(data[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field TunnelEndpointIP", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthOverlay
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.TunnelEndpointIP = string(data[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipOverlay(data[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthOverlay
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipOverlay(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if data[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthOverlay
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowOverlay
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipOverlay(data[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthOverlay = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowOverlay = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
var fileDescriptorOverlay = []byte{
|
||||
// 195 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x2f, 0x4b, 0x2d,
|
||||
0xca, 0x49, 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0x72, 0xa5, 0x44, 0xd2,
|
||||
0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x69, 0x2b, 0x23, 0x17, 0x57, 0x40,
|
||||
0x6a, 0x6a, 0x51, 0x50, 0x6a, 0x72, 0x7e, 0x51, 0x8a, 0x90, 0x3e, 0x17, 0x77, 0x6a, 0x5e, 0x4a,
|
||||
0x41, 0x7e, 0x66, 0x5e, 0x49, 0x7c, 0x66, 0x81, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x13, 0xdf,
|
||||
0xa3, 0x7b, 0xf2, 0x5c, 0xae, 0x50, 0x61, 0xcf, 0x80, 0x20, 0x2e, 0x98, 0x12, 0xcf, 0x02, 0x21,
|
||||
0x23, 0x2e, 0x1e, 0xb8, 0x86, 0xdc, 0xc4, 0x64, 0x09, 0x26, 0xb0, 0x0e, 0x7e, 0xa0, 0x0e, 0x6e,
|
||||
0x98, 0x0e, 0x5f, 0x47, 0xe7, 0x20, 0xb8, 0xa9, 0xbe, 0x89, 0xc9, 0x42, 0x4e, 0x5c, 0x42, 0x25,
|
||||
0xa5, 0x79, 0x79, 0xa9, 0x39, 0xf1, 0xc8, 0x76, 0x31, 0x83, 0x75, 0x8a, 0x00, 0x75, 0x0a, 0x84,
|
||||
0x80, 0x65, 0x91, 0x6c, 0x14, 0x28, 0x41, 0x15, 0x29, 0x70, 0x92, 0xb8, 0xf1, 0x50, 0x8e, 0xe1,
|
||||
0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x80, 0xf8, 0x02, 0x10, 0x3f, 0x00, 0xe2,
|
||||
0x24, 0x36, 0xb0, 0xc7, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xd7, 0x7d, 0x7d, 0x08,
|
||||
0x01, 0x00, 0x00,
|
||||
}
|
27
vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay.proto
generated
vendored
Normal file
27
vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay.proto
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
syntax = "proto3";
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
package overlay;
|
||||
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
option (gogoproto.stringer_all) = true;
|
||||
option (gogoproto.gostring_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.goproto_stringer_all) = false;
|
||||
|
||||
// PeerRecord defines the information corresponding to a peer
|
||||
// container in the overlay network.
|
||||
message PeerRecord {
|
||||
// Endpoint IP is the IP of the container attachment on the
|
||||
// given overlay network.
|
||||
string endpoint_ip = 1 [(gogoproto.customname) = "EndpointIP"];
|
||||
// Endpoint MAC is the mac address of the container attachment
|
||||
// on the given overlay network.
|
||||
string endpoint_mac = 2 [(gogoproto.customname) = "EndpointMAC"];
|
||||
// Tunnel Endpoint IP defines the host IP for the host in
|
||||
// which this container is running and can be reached by
|
||||
// building a tunnel to that host IP.
|
||||
string tunnel_endpoint_ip = 3 [(gogoproto.customname) = "TunnelEndpointIP"];
|
||||
}
|
297
vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go
generated
vendored
Normal file
297
vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,297 @@
|
|||
package overlay
|
||||
|
||||
//go:generate protoc -I.:../../Godeps/_workspace/src/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork/drivers/overlay,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. overlay.proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/discoverapi"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/idm"
|
||||
"github.com/docker/libnetwork/netlabel"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
||||
const (
|
||||
networkType = "overlay"
|
||||
vethPrefix = "veth"
|
||||
vethLen = 7
|
||||
vxlanIDStart = 4096
|
||||
vxlanIDEnd = (1 << 24) - 1
|
||||
vxlanPort = 4789
|
||||
vxlanEncap = 50
|
||||
secureOption = "encrypted"
|
||||
)
|
||||
|
||||
var initVxlanIdm = make(chan (bool), 1)
|
||||
|
||||
type driver struct {
|
||||
eventCh chan serf.Event
|
||||
notifyCh chan ovNotify
|
||||
exitCh chan chan struct{}
|
||||
bindAddress string
|
||||
advertiseAddress string
|
||||
neighIP string
|
||||
config map[string]interface{}
|
||||
serfInstance *serf.Serf
|
||||
networks networkTable
|
||||
store datastore.DataStore
|
||||
localStore datastore.DataStore
|
||||
vxlanIdm *idm.Idm
|
||||
once sync.Once
|
||||
joinOnce sync.Once
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// Init registers a new instance of overlay driver
|
||||
func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
|
||||
c := driverapi.Capability{
|
||||
DataScope: datastore.GlobalScope,
|
||||
}
|
||||
|
||||
d := &driver{
|
||||
networks: networkTable{},
|
||||
config: config,
|
||||
}
|
||||
|
||||
if data, ok := config[netlabel.GlobalKVClient]; ok {
|
||||
var err error
|
||||
dsc, ok := data.(discoverapi.DatastoreConfigData)
|
||||
if !ok {
|
||||
return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
|
||||
}
|
||||
d.store, err = datastore.NewDataStoreFromConfig(dsc)
|
||||
if err != nil {
|
||||
return types.InternalErrorf("failed to initialize data store: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if data, ok := config[netlabel.LocalKVClient]; ok {
|
||||
var err error
|
||||
dsc, ok := data.(discoverapi.DatastoreConfigData)
|
||||
if !ok {
|
||||
return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
|
||||
}
|
||||
d.localStore, err = datastore.NewDataStoreFromConfig(dsc)
|
||||
if err != nil {
|
||||
return types.InternalErrorf("failed to initialize local data store: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
d.restoreEndpoints()
|
||||
|
||||
return dc.RegisterDriver(networkType, d, c)
|
||||
}
|
||||
|
||||
// Endpoints are stored in the local store. Restore them and reconstruct the overlay sandbox
|
||||
func (d *driver) restoreEndpoints() error {
|
||||
if d.localStore == nil {
|
||||
logrus.Warnf("Cannot restore overlay endpoints because local datastore is missing")
|
||||
return nil
|
||||
}
|
||||
kvol, err := d.localStore.List(datastore.Key(overlayEndpointPrefix), &endpoint{})
|
||||
if err != nil && err != datastore.ErrKeyNotFound {
|
||||
return fmt.Errorf("failed to read overlay endpoint from store: %v", err)
|
||||
}
|
||||
|
||||
if err == datastore.ErrKeyNotFound {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, kvo := range kvol {
|
||||
ep := kvo.(*endpoint)
|
||||
|
||||
n := d.network(ep.nid)
|
||||
if n == nil || ep.remote {
|
||||
if !ep.remote {
|
||||
logrus.Debugf("Network (%s) not found for restored endpoint (%s)", ep.nid[0:7], ep.id[0:7])
|
||||
logrus.Debugf("Deleting stale overlay endpoint (%s) from store", ep.id[0:7])
|
||||
}
|
||||
|
||||
hcsshim.HNSEndpointRequest("DELETE", ep.profileId, "")
|
||||
|
||||
if err := d.deleteEndpointFromStore(ep); err != nil {
|
||||
logrus.Debugf("Failed to delete stale overlay endpoint (%s) from store", ep.id[0:7])
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
n.addEndpoint(ep)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fini cleans up the driver resources
|
||||
func Fini(drv driverapi.Driver) {
|
||||
d := drv.(*driver)
|
||||
|
||||
if d.exitCh != nil {
|
||||
waitCh := make(chan struct{})
|
||||
|
||||
d.exitCh <- waitCh
|
||||
|
||||
<-waitCh
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) configure() error {
|
||||
if d.store == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.vxlanIdm == nil {
|
||||
return d.initializeVxlanIdm()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) initializeVxlanIdm() error {
|
||||
var err error
|
||||
|
||||
initVxlanIdm <- true
|
||||
defer func() { <-initVxlanIdm }()
|
||||
|
||||
if d.vxlanIdm != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.vxlanIdm, err = idm.New(d.store, "vxlan-id", vxlanIDStart, vxlanIDEnd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize vxlan id manager: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) Type() string {
|
||||
return networkType
|
||||
}
|
||||
|
||||
func validateSelf(node string) error {
|
||||
advIP := net.ParseIP(node)
|
||||
if advIP == nil {
|
||||
return fmt.Errorf("invalid self address (%s)", node)
|
||||
}
|
||||
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to get interface addresses %v", err)
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
ip, _, err := net.ParseCIDR(addr.String())
|
||||
if err == nil && ip.Equal(advIP) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Multi-Host overlay networking requires cluster-advertise(%s) to be configured with a local ip-address that is reachable within the cluster", advIP.String())
|
||||
}
|
||||
|
||||
func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) {
|
||||
if self && !d.isSerfAlive() {
|
||||
if err := validateSelf(advertiseAddress); err != nil {
|
||||
logrus.Errorf("%s", err.Error())
|
||||
}
|
||||
|
||||
d.Lock()
|
||||
d.advertiseAddress = advertiseAddress
|
||||
d.bindAddress = bindAddress
|
||||
d.Unlock()
|
||||
|
||||
// If there is no cluster store there is no need to start serf.
|
||||
if d.store != nil {
|
||||
err := d.serfInit()
|
||||
if err != nil {
|
||||
logrus.Errorf("initializing serf instance failed: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.Lock()
|
||||
if !self {
|
||||
d.neighIP = advertiseAddress
|
||||
}
|
||||
neighIP := d.neighIP
|
||||
d.Unlock()
|
||||
|
||||
if d.serfInstance != nil && neighIP != "" {
|
||||
var err error
|
||||
d.joinOnce.Do(func() {
|
||||
err = d.serfJoin(neighIP)
|
||||
if err == nil {
|
||||
d.pushLocalDb()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Errorf("joining serf neighbor %s failed: %v", advertiseAddress, err)
|
||||
d.Lock()
|
||||
d.joinOnce = sync.Once{}
|
||||
d.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) pushLocalEndpointEvent(action, nid, eid string) {
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
logrus.Debugf("Error pushing local endpoint event for network %s", nid)
|
||||
return
|
||||
}
|
||||
ep := n.endpoint(eid)
|
||||
if ep == nil {
|
||||
logrus.Debugf("Error pushing local endpoint event for ep %s / %s", nid, eid)
|
||||
return
|
||||
}
|
||||
|
||||
if !d.isSerfAlive() {
|
||||
return
|
||||
}
|
||||
d.notifyCh <- ovNotify{
|
||||
action: action,
|
||||
nw: n,
|
||||
ep: ep,
|
||||
}
|
||||
}
|
||||
|
||||
// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster
|
||||
func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error {
|
||||
|
||||
var err error
|
||||
switch dType {
|
||||
case discoverapi.NodeDiscovery:
|
||||
nodeData, ok := data.(discoverapi.NodeDiscoveryData)
|
||||
if !ok || nodeData.Address == "" {
|
||||
return fmt.Errorf("invalid discovery data")
|
||||
}
|
||||
d.nodeJoin(nodeData.Address, nodeData.BindAddress, nodeData.Self)
|
||||
case discoverapi.DatastoreConfig:
|
||||
if d.store != nil {
|
||||
return types.ForbiddenErrorf("cannot accept datastore configuration: Overlay driver has a datastore configured already")
|
||||
}
|
||||
dsc, ok := data.(discoverapi.DatastoreConfigData)
|
||||
if !ok {
|
||||
return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
|
||||
}
|
||||
d.store, err = datastore.NewDataStoreFromConfig(dsc)
|
||||
if err != nil {
|
||||
return types.InternalErrorf("failed to initialize data store: %v", err)
|
||||
}
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster
|
||||
func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error {
|
||||
return nil
|
||||
}
|
154
vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go
generated
vendored
Normal file
154
vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,154 @@
|
|||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
||||
const ovPeerTable = "overlay_peer_table"
|
||||
|
||||
func (d *driver) pushLocalDb() {
|
||||
if !d.isSerfAlive() {
|
||||
return
|
||||
}
|
||||
|
||||
d.Lock()
|
||||
networks := d.networks
|
||||
d.Unlock()
|
||||
|
||||
for _, n := range networks {
|
||||
n.Lock()
|
||||
endpoints := n.endpoints
|
||||
n.Unlock()
|
||||
|
||||
for _, ep := range endpoints {
|
||||
if !ep.remote {
|
||||
d.notifyCh <- ovNotify{
|
||||
action: "join",
|
||||
nw: n,
|
||||
ep: ep,
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {
|
||||
|
||||
logrus.Debugf("WINOVERLAY: Enter peerAdd for ca ip %s with ca mac %s", peerIP.String(), peerMac.String())
|
||||
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if updateDb {
|
||||
logrus.Info("WINOVERLAY: peerAdd: notifying HNS of the REMOTE endpoint")
|
||||
|
||||
hnsEndpoint := &hcsshim.HNSEndpoint{
|
||||
VirtualNetwork: n.hnsId,
|
||||
MacAddress: peerMac.String(),
|
||||
IPAddress: peerIP,
|
||||
IsRemoteEndpoint: true,
|
||||
}
|
||||
|
||||
paPolicy, err := json.Marshal(hcsshim.PaPolicy{
|
||||
Type: "PA",
|
||||
PA: vtep.String(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hnsEndpoint.Policies = append(hnsEndpoint.Policies, paPolicy)
|
||||
|
||||
configurationb, err := json.Marshal(hnsEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Temp: We have to create a endpoint object to keep track of the HNS ID for
|
||||
// this endpoint so that we can retrieve it later when the endpoint is deleted.
|
||||
// This seems unnecessary when we already have dockers EID. See if we can pass
|
||||
// the global EID to HNS to use as it's ID, rather than having each HNS assign
|
||||
// it's own local ID for the endpoint
|
||||
|
||||
addr, err := types.ParseCIDR(peerIP.String() + "/32")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n.removeEndpointWithAddress(addr)
|
||||
|
||||
hnsresponse, err := hcsshim.HNSEndpointRequest("POST", "", string(configurationb))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ep := &endpoint{
|
||||
id: eid,
|
||||
nid: nid,
|
||||
addr: addr,
|
||||
mac: peerMac,
|
||||
profileId: hnsresponse.Id,
|
||||
remote: true,
|
||||
}
|
||||
|
||||
n.addEndpoint(ep)
|
||||
|
||||
if err := d.writeEndpointToStore(ep); err != nil {
|
||||
return fmt.Errorf("failed to update overlay endpoint %s to local store: %v", ep.id[0:7], err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {
|
||||
|
||||
logrus.Infof("WINOVERLAY: Enter peerDelete for endpoint %s and peer ip %s", eid, peerIP.String())
|
||||
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ep := n.endpoint(eid)
|
||||
if ep == nil {
|
||||
return fmt.Errorf("could not find endpoint with id %s", eid)
|
||||
}
|
||||
|
||||
if updateDb {
|
||||
_, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileId, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n.deleteEndpoint(eid)
|
||||
|
||||
if err := d.deleteEndpointFromStore(ep); err != nil {
|
||||
logrus.Debugf("Failed to delete stale overlay endpoint (%s) from store", ep.id[0:7])
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
130
vendor/github.com/docker/libnetwork/drivers/windows/windows.go
generated
vendored
130
vendor/github.com/docker/libnetwork/drivers/windows/windows.go
generated
vendored
|
@ -20,7 +20,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/discoverapi"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
|
@ -44,21 +44,28 @@ type networkConfiguration struct {
|
|||
}
|
||||
|
||||
// endpointConfiguration represents the user specified configuration for the sandbox endpoint
|
||||
type endpointConfiguration struct {
|
||||
MacAddress net.HardwareAddr
|
||||
type endpointOption struct {
|
||||
MacAddress net.HardwareAddr
|
||||
QosPolicies []types.QosPolicy
|
||||
DNSServers []string
|
||||
DisableDNS bool
|
||||
DisableICC bool
|
||||
}
|
||||
|
||||
type endpointConnectivity struct {
|
||||
PortBindings []types.PortBinding
|
||||
ExposedPorts []types.TransportPort
|
||||
QosPolicies []types.QosPolicy
|
||||
DNSServers []string
|
||||
}
|
||||
|
||||
type hnsEndpoint struct {
|
||||
id string
|
||||
profileID string
|
||||
macAddress net.HardwareAddr
|
||||
config *endpointConfiguration // User specified parameters
|
||||
portMapping []types.PortBinding // Operation port bindings
|
||||
addr *net.IPNet
|
||||
id string
|
||||
profileID string
|
||||
macAddress net.HardwareAddr
|
||||
epOption *endpointOption // User specified parameters
|
||||
epConnectivity *endpointConnectivity // User specified parameters
|
||||
portMapping []types.PortBinding // Operation port bindings
|
||||
addr *net.IPNet
|
||||
gateway net.IP
|
||||
}
|
||||
|
||||
type hnsNetwork struct {
|
||||
|
@ -75,7 +82,8 @@ type driver struct {
|
|||
sync.Mutex
|
||||
}
|
||||
|
||||
func isValidNetworkType(networkType string) bool {
|
||||
// IsBuiltinWindowsDriver vaidates if network-type is a builtin local-scoped driver
|
||||
func IsBuiltinLocalDriver(networkType string) bool {
|
||||
if "l2bridge" == networkType || "l2tunnel" == networkType || "nat" == networkType || "ics" == networkType || "transparent" == networkType {
|
||||
return true
|
||||
}
|
||||
|
@ -91,7 +99,7 @@ func newDriver(networkType string) *driver {
|
|||
// GetInit returns an initializer for the given network type
|
||||
func GetInit(networkType string) func(dc driverapi.DriverCallback, config map[string]interface{}) error {
|
||||
return func(dc driverapi.DriverCallback, config map[string]interface{}) error {
|
||||
if !isValidNetworkType(networkType) {
|
||||
if !IsBuiltinLocalDriver(networkType) {
|
||||
return types.BadRequestErrorf("Network type not supported: %s", networkType)
|
||||
}
|
||||
|
||||
|
@ -270,7 +278,7 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d
|
|||
}
|
||||
|
||||
configuration := string(configurationb)
|
||||
log.Debugf("HNSNetwork Request =%v Address Space=%v", configuration, subnets)
|
||||
logrus.Debugf("HNSNetwork Request =%v Address Space=%v", configuration, subnets)
|
||||
|
||||
hnsresponse, err := hcsshim.HNSNetworkRequest("POST", "", configuration)
|
||||
if err != nil {
|
||||
|
@ -390,12 +398,12 @@ func parsePortBindingPolicies(policies []json.RawMessage) ([]types.PortBinding,
|
|||
return bindings, nil
|
||||
}
|
||||
|
||||
func parseEndpointOptions(epOptions map[string]interface{}) (*endpointConfiguration, error) {
|
||||
func parseEndpointOptions(epOptions map[string]interface{}) (*endpointOption, error) {
|
||||
if epOptions == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ec := &endpointConfiguration{}
|
||||
ec := &endpointOption{}
|
||||
|
||||
if opt, ok := epOptions[netlabel.MacAddress]; ok {
|
||||
if mac, ok := opt.(net.HardwareAddr); ok {
|
||||
|
@ -405,22 +413,6 @@ func parseEndpointOptions(epOptions map[string]interface{}) (*endpointConfigurat
|
|||
}
|
||||
}
|
||||
|
||||
if opt, ok := epOptions[netlabel.PortMap]; ok {
|
||||
if bs, ok := opt.([]types.PortBinding); ok {
|
||||
ec.PortBindings = bs
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid endpoint configuration")
|
||||
}
|
||||
}
|
||||
|
||||
if opt, ok := epOptions[netlabel.ExposedPorts]; ok {
|
||||
if ports, ok := opt.([]types.TransportPort); ok {
|
||||
ec.ExposedPorts = ports
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid endpoint configuration")
|
||||
}
|
||||
}
|
||||
|
||||
if opt, ok := epOptions[QosPolicies]; ok {
|
||||
if policies, ok := opt.([]types.QosPolicy); ok {
|
||||
ec.QosPolicies = policies
|
||||
|
@ -437,6 +429,47 @@ func parseEndpointOptions(epOptions map[string]interface{}) (*endpointConfigurat
|
|||
}
|
||||
}
|
||||
|
||||
if opt, ok := epOptions[DisableICC]; ok {
|
||||
if disableICC, ok := opt.(bool); ok {
|
||||
ec.DisableICC = disableICC
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid endpoint configuration")
|
||||
}
|
||||
}
|
||||
|
||||
if opt, ok := epOptions[DisableDNS]; ok {
|
||||
if disableDNS, ok := opt.(bool); ok {
|
||||
ec.DisableDNS = disableDNS
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid endpoint configuration")
|
||||
}
|
||||
}
|
||||
|
||||
return ec, nil
|
||||
}
|
||||
|
||||
func parseEndpointConnectivity(epOptions map[string]interface{}) (*endpointConnectivity, error) {
|
||||
if epOptions == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ec := &endpointConnectivity{}
|
||||
|
||||
if opt, ok := epOptions[netlabel.PortMap]; ok {
|
||||
if bs, ok := opt.([]types.PortBinding); ok {
|
||||
ec.PortBindings = bs
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid endpoint configuration")
|
||||
}
|
||||
}
|
||||
|
||||
if opt, ok := epOptions[netlabel.ExposedPorts]; ok {
|
||||
if ports, ok := opt.([]types.TransportPort); ok {
|
||||
ec.ExposedPorts = ports
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid endpoint configuration")
|
||||
}
|
||||
}
|
||||
return ec, nil
|
||||
}
|
||||
|
||||
|
@ -456,7 +489,8 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
|
|||
VirtualNetwork: n.config.HnsID,
|
||||
}
|
||||
|
||||
ec, err := parseEndpointOptions(epOptions)
|
||||
epOption, err := parseEndpointOptions(epOptions)
|
||||
epConnectivity, err := parseEndpointConnectivity(epOptions)
|
||||
|
||||
macAddress := ifInfo.MacAddress()
|
||||
// Use the macaddress if it was provided
|
||||
|
@ -464,12 +498,12 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
|
|||
endpointStruct.MacAddress = strings.Replace(macAddress.String(), ":", "-", -1)
|
||||
}
|
||||
|
||||
endpointStruct.Policies, err = convertPortBindings(ec.PortBindings)
|
||||
endpointStruct.Policies, err = convertPortBindings(epConnectivity.PortBindings)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
qosPolicies, err := convertQosPolicies(ec.QosPolicies)
|
||||
qosPolicies, err := convertQosPolicies(epOption.QosPolicies)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -479,12 +513,14 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
|
|||
endpointStruct.IPAddress = ifInfo.Address().IP
|
||||
}
|
||||
|
||||
endpointStruct.DNSServerList = strings.Join(ec.DNSServers, ",")
|
||||
endpointStruct.DNSServerList = strings.Join(epOption.DNSServers, ",")
|
||||
|
||||
if n.driver.name == "nat" {
|
||||
if n.driver.name == "nat" && !epOption.DisableDNS {
|
||||
endpointStruct.EnableInternalDNS = true
|
||||
}
|
||||
|
||||
endpointStruct.DisableICC = epOption.DisableICC
|
||||
|
||||
configurationb, err := json.Marshal(endpointStruct)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -507,8 +543,13 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
|
|||
macAddress: mac,
|
||||
}
|
||||
|
||||
if hnsresponse.GatewayAddress != "" {
|
||||
endpoint.gateway = net.ParseIP(hnsresponse.GatewayAddress)
|
||||
}
|
||||
|
||||
endpoint.profileID = hnsresponse.Id
|
||||
endpoint.config = ec
|
||||
endpoint.epConnectivity = epConnectivity
|
||||
endpoint.epOption = epOption
|
||||
endpoint.portMapping, err = parsePortBindingPolicies(hnsresponse.Policies)
|
||||
|
||||
if err != nil {
|
||||
|
@ -571,10 +612,10 @@ func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, erro
|
|||
}
|
||||
|
||||
data["hnsid"] = ep.profileID
|
||||
if ep.config.ExposedPorts != nil {
|
||||
if ep.epConnectivity.ExposedPorts != nil {
|
||||
// Return a copy of the config data
|
||||
epc := make([]types.TransportPort, 0, len(ep.config.ExposedPorts))
|
||||
for _, tp := range ep.config.ExposedPorts {
|
||||
epc := make([]types.TransportPort, 0, len(ep.epConnectivity.ExposedPorts))
|
||||
for _, tp := range ep.epConnectivity.ExposedPorts {
|
||||
epc = append(epc, tp.GetCopy())
|
||||
}
|
||||
data[netlabel.ExposedPorts] = epc
|
||||
|
@ -603,7 +644,12 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
|
|||
}
|
||||
|
||||
// Ensure that the endpoint exists
|
||||
_, err = network.getEndpoint(eid)
|
||||
endpoint, err := network.getEndpoint(eid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = jinfo.SetGateway(endpoint.gateway)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/libnetwork/drivers_solaris.go
generated
vendored
2
vendor/github.com/docker/libnetwork/drivers_solaris.go
generated
vendored
|
@ -3,10 +3,12 @@ package libnetwork
|
|||
import (
|
||||
"github.com/docker/libnetwork/drivers/null"
|
||||
"github.com/docker/libnetwork/drivers/solaris/bridge"
|
||||
"github.com/docker/libnetwork/drivers/solaris/overlay"
|
||||
)
|
||||
|
||||
func getInitializers() []initializer {
|
||||
return []initializer{
|
||||
{overlay.Init, "overlay"},
|
||||
{bridge.Init, "bridge"},
|
||||
{null.Init, "null"},
|
||||
}
|
||||
|
|
4
vendor/github.com/docker/libnetwork/drivers_windows.go
generated
vendored
4
vendor/github.com/docker/libnetwork/drivers_windows.go
generated
vendored
|
@ -2,12 +2,16 @@ package libnetwork
|
|||
|
||||
import (
|
||||
"github.com/docker/libnetwork/drivers/null"
|
||||
"github.com/docker/libnetwork/drivers/remote"
|
||||
"github.com/docker/libnetwork/drivers/windows"
|
||||
"github.com/docker/libnetwork/drivers/windows/overlay"
|
||||
)
|
||||
|
||||
func getInitializers() []initializer {
|
||||
return []initializer{
|
||||
{null.Init, "null"},
|
||||
{overlay.Init, "overlay"},
|
||||
{remote.Init, "remote"},
|
||||
{windows.GetInit("transparent"), "transparent"},
|
||||
{windows.GetInit("l2bridge"), "l2bridge"},
|
||||
{windows.GetInit("l2tunnel"), "l2tunnel"},
|
||||
|
|
76
vendor/github.com/docker/libnetwork/endpoint.go
generated
vendored
76
vendor/github.com/docker/libnetwork/endpoint.go
generated
vendored
|
@ -8,7 +8,7 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/libnetwork/netlabel"
|
||||
|
@ -142,12 +142,12 @@ func (ep *endpoint) UnmarshalJSON(b []byte) (err error) {
|
|||
|
||||
bytes, err := json.Marshal(tmp)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
logrus.Error(err)
|
||||
break
|
||||
}
|
||||
err = json.Unmarshal(bytes, &pb)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
logrus.Error(err)
|
||||
break
|
||||
}
|
||||
pblist = append(pblist, pb)
|
||||
|
@ -164,12 +164,12 @@ func (ep *endpoint) UnmarshalJSON(b []byte) (err error) {
|
|||
|
||||
bytes, err := json.Marshal(tmp)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
logrus.Error(err)
|
||||
break
|
||||
}
|
||||
err = json.Unmarshal(bytes, &tp)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
logrus.Error(err)
|
||||
break
|
||||
}
|
||||
tplist = append(tplist, tp)
|
||||
|
@ -472,7 +472,7 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
defer func() {
|
||||
if err != nil {
|
||||
if err := d.Leave(nid, epid); err != nil {
|
||||
log.Warnf("driver leave failed while rolling back join: %v", err)
|
||||
logrus.Warnf("driver leave failed while rolling back join: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -495,10 +495,6 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err = n.getController().updateToStore(ep); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Current endpoint providing external connectivity for the sandbox
|
||||
extEp := sb.getGatewayEndpoint()
|
||||
|
||||
|
@ -515,6 +511,10 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err = n.getController().updateToStore(ep); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sb.needDefaultGW() && sb.getEndpointInGWNetwork() == nil {
|
||||
return sb.setupDefaultGW()
|
||||
}
|
||||
|
@ -523,7 +523,7 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
|
||||
if moveExtConn {
|
||||
if extEp != nil {
|
||||
log.Debugf("Revoking external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID())
|
||||
logrus.Debugf("Revoking external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID())
|
||||
extN, err := extEp.getNetworkFromStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get network from store during join: %v", err)
|
||||
|
@ -540,14 +540,14 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
defer func() {
|
||||
if err != nil {
|
||||
if e := extD.ProgramExternalConnectivity(extEp.network.ID(), extEp.ID(), sb.Labels()); e != nil {
|
||||
log.Warnf("Failed to roll-back external connectivity on endpoint %s (%s): %v",
|
||||
logrus.Warnf("Failed to roll-back external connectivity on endpoint %s (%s): %v",
|
||||
extEp.Name(), extEp.ID(), e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
if !n.internal {
|
||||
log.Debugf("Programming external connectivity on endpoint %s (%s)", ep.Name(), ep.ID())
|
||||
logrus.Debugf("Programming external connectivity on endpoint %s (%s)", ep.Name(), ep.ID())
|
||||
if err = d.ProgramExternalConnectivity(n.ID(), ep.ID(), sb.Labels()); err != nil {
|
||||
return types.InternalErrorf(
|
||||
"driver failed programming external connectivity on endpoint %s (%s): %v",
|
||||
|
@ -559,7 +559,7 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
|
|||
|
||||
if !sb.needDefaultGW() {
|
||||
if err := sb.clearDefaultGW(); err != nil {
|
||||
log.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v",
|
||||
logrus.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v",
|
||||
sb.ID(), sb.ContainerID(), err)
|
||||
}
|
||||
}
|
||||
|
@ -682,22 +682,22 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
|
|||
|
||||
if d != nil {
|
||||
if moveExtConn {
|
||||
log.Debugf("Revoking external connectivity on endpoint %s (%s)", ep.Name(), ep.ID())
|
||||
logrus.Debugf("Revoking external connectivity on endpoint %s (%s)", ep.Name(), ep.ID())
|
||||
if err := d.RevokeExternalConnectivity(n.id, ep.id); err != nil {
|
||||
log.Warnf("driver failed revoking external connectivity on endpoint %s (%s): %v",
|
||||
logrus.Warnf("driver failed revoking external connectivity on endpoint %s (%s): %v",
|
||||
ep.Name(), ep.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.Leave(n.id, ep.id); err != nil {
|
||||
if _, ok := err.(types.MaskableError); !ok {
|
||||
log.Warnf("driver error disconnecting container %s : %v", ep.name, err)
|
||||
logrus.Warnf("driver error disconnecting container %s : %v", ep.name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := sb.clearNetworkResources(ep); err != nil {
|
||||
log.Warnf("Could not cleanup network resources on container %s disconnect: %v", ep.name, err)
|
||||
logrus.Warnf("Could not cleanup network resources on container %s disconnect: %v", ep.name, err)
|
||||
}
|
||||
|
||||
// Update the store about the sandbox detach only after we
|
||||
|
@ -710,7 +710,7 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
|
|||
}
|
||||
|
||||
if e := ep.deleteFromCluster(); e != nil {
|
||||
log.Errorf("Could not delete state for endpoint %s from cluster: %v", ep.Name(), e)
|
||||
logrus.Errorf("Could not delete state for endpoint %s from cluster: %v", ep.Name(), e)
|
||||
}
|
||||
|
||||
sb.deleteHostsEntries(n.getSvcRecords(ep))
|
||||
|
@ -721,7 +721,7 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
|
|||
// New endpoint providing external connectivity for the sandbox
|
||||
extEp = sb.getGatewayEndpoint()
|
||||
if moveExtConn && extEp != nil {
|
||||
log.Debugf("Programming external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID())
|
||||
logrus.Debugf("Programming external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID())
|
||||
extN, err := extEp.getNetworkFromStore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get network from store during leave: %v", err)
|
||||
|
@ -731,14 +731,14 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
|
|||
return fmt.Errorf("failed to leave endpoint: %v", err)
|
||||
}
|
||||
if err := extD.ProgramExternalConnectivity(extEp.network.ID(), extEp.ID(), sb.Labels()); err != nil {
|
||||
log.Warnf("driver failed programming external connectivity on endpoint %s: (%s) %v",
|
||||
logrus.Warnf("driver failed programming external connectivity on endpoint %s: (%s) %v",
|
||||
extEp.Name(), extEp.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if !sb.needDefaultGW() {
|
||||
if err := sb.clearDefaultGW(); err != nil {
|
||||
log.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v",
|
||||
logrus.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v",
|
||||
sb.ID(), sb.ContainerID(), err)
|
||||
}
|
||||
}
|
||||
|
@ -771,7 +771,7 @@ func (ep *endpoint) Delete(force bool) error {
|
|||
|
||||
if sb != nil {
|
||||
if e := ep.sbLeave(sb.(*sandbox), force); e != nil {
|
||||
log.Warnf("failed to leave sandbox for endpoint %s : %v", name, e)
|
||||
logrus.Warnf("failed to leave sandbox for endpoint %s : %v", name, e)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -783,7 +783,7 @@ func (ep *endpoint) Delete(force bool) error {
|
|||
if err != nil && !force {
|
||||
ep.dbExists = false
|
||||
if e := n.getController().updateToStore(ep); e != nil {
|
||||
log.Warnf("failed to recreate endpoint in store %s : %v", name, e)
|
||||
logrus.Warnf("failed to recreate endpoint in store %s : %v", name, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -798,7 +798,7 @@ func (ep *endpoint) Delete(force bool) error {
|
|||
ep.releaseAddress()
|
||||
|
||||
if err := n.getEpCnt().DecEndpointCnt(); err != nil {
|
||||
log.Warnf("failed to decrement endpoint count for ep %s: %v", ep.ID(), err)
|
||||
logrus.Warnf("failed to decrement endpoint count for ep %s: %v", ep.ID(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -826,7 +826,7 @@ func (ep *endpoint) deleteEndpoint(force bool) error {
|
|||
}
|
||||
|
||||
if _, ok := err.(types.MaskableError); !ok {
|
||||
log.Warnf("driver error deleting endpoint %s : %v", name, err)
|
||||
logrus.Warnf("driver error deleting endpoint %s : %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -976,7 +976,7 @@ func JoinOptionPriority(ep Endpoint, prio int) EndpointOption {
|
|||
sb, ok := c.sandboxes[ep.sandboxID]
|
||||
c.Unlock()
|
||||
if !ok {
|
||||
log.Errorf("Could not set endpoint priority value during Join to endpoint %s: No sandbox id present in endpoint", ep.id)
|
||||
logrus.Errorf("Could not set endpoint priority value during Join to endpoint %s: No sandbox id present in endpoint", ep.id)
|
||||
return
|
||||
}
|
||||
sb.epPriority[ep.id] = prio
|
||||
|
@ -995,7 +995,7 @@ func (ep *endpoint) assignAddress(ipam ipamapi.Ipam, assignIPv4, assignIPv6 bool
|
|||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("Assigning addresses for endpoint %s's interface on network %s", ep.Name(), n.Name())
|
||||
logrus.Debugf("Assigning addresses for endpoint %s's interface on network %s", ep.Name(), n.Name())
|
||||
|
||||
if assignIPv4 {
|
||||
if err = ep.assignAddressVersion(4, ipam); err != nil {
|
||||
|
@ -1075,23 +1075,23 @@ func (ep *endpoint) releaseAddress() {
|
|||
return
|
||||
}
|
||||
|
||||
log.Debugf("Releasing addresses for endpoint %s's interface on network %s", ep.Name(), n.Name())
|
||||
logrus.Debugf("Releasing addresses for endpoint %s's interface on network %s", ep.Name(), n.Name())
|
||||
|
||||
ipam, _, err := n.getController().getIPAMDriver(n.ipamType)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to retrieve ipam driver to release interface address on delete of endpoint %s (%s): %v", ep.Name(), ep.ID(), err)
|
||||
logrus.Warnf("Failed to retrieve ipam driver to release interface address on delete of endpoint %s (%s): %v", ep.Name(), ep.ID(), err)
|
||||
return
|
||||
}
|
||||
|
||||
if ep.iface.addr != nil {
|
||||
if err := ipam.ReleaseAddress(ep.iface.v4PoolID, ep.iface.addr.IP); err != nil {
|
||||
log.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addr.IP, ep.Name(), ep.ID(), err)
|
||||
logrus.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addr.IP, ep.Name(), ep.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if ep.iface.addrv6 != nil && ep.iface.addrv6.IP.IsGlobalUnicast() {
|
||||
if err := ipam.ReleaseAddress(ep.iface.v6PoolID, ep.iface.addrv6.IP); err != nil {
|
||||
log.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addrv6.IP, ep.Name(), ep.ID(), err)
|
||||
logrus.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addrv6.IP, ep.Name(), ep.ID(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1106,14 +1106,14 @@ func (c *controller) cleanupLocalEndpoints() {
|
|||
}
|
||||
nl, err := c.getNetworksForScope(datastore.LocalScope)
|
||||
if err != nil {
|
||||
log.Warnf("Could not get list of networks during endpoint cleanup: %v", err)
|
||||
logrus.Warnf("Could not get list of networks during endpoint cleanup: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, n := range nl {
|
||||
epl, err := n.getEndpointsFromStore()
|
||||
if err != nil {
|
||||
log.Warnf("Could not get list of endpoints in network %s during endpoint cleanup: %v", n.name, err)
|
||||
logrus.Warnf("Could not get list of endpoints in network %s during endpoint cleanup: %v", n.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -1121,21 +1121,21 @@ func (c *controller) cleanupLocalEndpoints() {
|
|||
if _, ok := eps[ep.id]; ok {
|
||||
continue
|
||||
}
|
||||
log.Infof("Removing stale endpoint %s (%s)", ep.name, ep.id)
|
||||
logrus.Infof("Removing stale endpoint %s (%s)", ep.name, ep.id)
|
||||
if err := ep.Delete(true); err != nil {
|
||||
log.Warnf("Could not delete local endpoint %s during endpoint cleanup: %v", ep.name, err)
|
||||
logrus.Warnf("Could not delete local endpoint %s during endpoint cleanup: %v", ep.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
epl, err = n.getEndpointsFromStore()
|
||||
if err != nil {
|
||||
log.Warnf("Could not get list of endpoints in network %s for count update: %v", n.name, err)
|
||||
logrus.Warnf("Could not get list of endpoints in network %s for count update: %v", n.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
epCnt := n.getEpCnt().EndpointCnt()
|
||||
if epCnt != uint64(len(epl)) {
|
||||
log.Infof("Fixing inconsistent endpoint_cnt for network %s. Expected=%d, Actual=%d", n.name, len(epl), epCnt)
|
||||
logrus.Infof("Fixing inconsistent endpoint_cnt for network %s. Expected=%d, Actual=%d", n.name, len(epl), epCnt)
|
||||
n.getEpCnt().setCnt(uint64(len(epl)))
|
||||
}
|
||||
}
|
||||
|
|
25
vendor/github.com/docker/libnetwork/endpoint_info.go
generated
vendored
25
vendor/github.com/docker/libnetwork/endpoint_info.go
generated
vendored
|
@ -205,31 +205,6 @@ func (ep *endpoint) Info() EndpointInfo {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ep *endpoint) DriverInfo() (map[string]interface{}, error) {
|
||||
ep, err := ep.retrieveFromStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sb, ok := ep.getSandbox(); ok {
|
||||
if gwep := sb.getEndpointInGWNetwork(); gwep != nil && gwep.ID() != ep.ID() {
|
||||
return gwep.DriverInfo()
|
||||
}
|
||||
}
|
||||
|
||||
n, err := ep.getNetworkFromStore()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not find network in store for driver info: %v", err)
|
||||
}
|
||||
|
||||
driver, err := n.driver(true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get driver info: %v", err)
|
||||
}
|
||||
|
||||
return driver.EndpointOperInfo(n.ID(), ep.ID())
|
||||
}
|
||||
|
||||
func (ep *endpoint) Iface() InterfaceInfo {
|
||||
ep.Lock()
|
||||
defer ep.Unlock()
|
||||
|
|
30
vendor/github.com/docker/libnetwork/endpoint_info_unix.go
generated
vendored
Normal file
30
vendor/github.com/docker/libnetwork/endpoint_info_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// +build !windows
|
||||
|
||||
package libnetwork
|
||||
|
||||
import "fmt"
|
||||
|
||||
func (ep *endpoint) DriverInfo() (map[string]interface{}, error) {
|
||||
ep, err := ep.retrieveFromStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sb, ok := ep.getSandbox(); ok {
|
||||
if gwep := sb.getEndpointInGWNetwork(); gwep != nil && gwep.ID() != ep.ID() {
|
||||
return gwep.DriverInfo()
|
||||
}
|
||||
}
|
||||
|
||||
n, err := ep.getNetworkFromStore()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not find network in store for driver info: %v", err)
|
||||
}
|
||||
|
||||
driver, err := n.driver(true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get driver info: %v", err)
|
||||
}
|
||||
|
||||
return driver.EndpointOperInfo(n.ID(), ep.ID())
|
||||
}
|
45
vendor/github.com/docker/libnetwork/endpoint_info_windows.go
generated
vendored
Normal file
45
vendor/github.com/docker/libnetwork/endpoint_info_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
// +build windows
|
||||
|
||||
package libnetwork
|
||||
|
||||
import "fmt"
|
||||
|
||||
func (ep *endpoint) DriverInfo() (map[string]interface{}, error) {
|
||||
ep, err := ep.retrieveFromStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var gwDriverInfo map[string]interface{}
|
||||
if sb, ok := ep.getSandbox(); ok {
|
||||
if gwep := sb.getEndpointInGWNetwork(); gwep != nil && gwep.ID() != ep.ID() {
|
||||
|
||||
gwDriverInfo, err = gwep.DriverInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n, err := ep.getNetworkFromStore()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not find network in store for driver info: %v", err)
|
||||
}
|
||||
|
||||
driver, err := n.driver(true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get driver info: %v", err)
|
||||
}
|
||||
|
||||
epInfo, err := driver.EndpointOperInfo(n.ID(), ep.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if epInfo != nil {
|
||||
epInfo["GW_INFO"] = gwDriverInfo
|
||||
return epInfo, nil
|
||||
}
|
||||
|
||||
return gwDriverInfo, nil
|
||||
}
|
4
vendor/github.com/docker/libnetwork/hostdiscovery/hostdiscovery.go
generated
vendored
4
vendor/github.com/docker/libnetwork/hostdiscovery/hostdiscovery.go
generated
vendored
|
@ -4,7 +4,7 @@ import (
|
|||
"net"
|
||||
"sync"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
|
@ -54,7 +54,7 @@ func (h *hostDiscovery) monitorDiscovery(ch <-chan discovery.Entries, errCh <-ch
|
|||
h.processCallback(entries, activeCallback, joinCallback, leaveCallback)
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
log.Errorf("discovery error: %v", err)
|
||||
logrus.Errorf("discovery error: %v", err)
|
||||
}
|
||||
case <-h.stopChan:
|
||||
return
|
||||
|
|
13
vendor/github.com/docker/libnetwork/idm/idm.go
generated
vendored
13
vendor/github.com/docker/libnetwork/idm/idm.go
generated
vendored
|
@ -54,6 +54,19 @@ func (i *Idm) GetSpecificID(id uint64) error {
|
|||
return i.handle.Set(id - i.start)
|
||||
}
|
||||
|
||||
// GetIDInRange returns the first available id in the set within a range
|
||||
func (i *Idm) GetIDInRange(start, end uint64) (uint64, error) {
|
||||
if i.handle == nil {
|
||||
return 0, fmt.Errorf("ID set is not initialized")
|
||||
}
|
||||
|
||||
if start < i.start || end > i.end {
|
||||
return 0, fmt.Errorf("Requested range does not belong to the set")
|
||||
}
|
||||
|
||||
return i.handle.SetAnyInRange(start, end-start)
|
||||
}
|
||||
|
||||
// Release releases the specified id
|
||||
func (i *Idm) Release(id uint64) {
|
||||
i.handle.Unset(id - i.start)
|
||||
|
|
18
vendor/github.com/docker/libnetwork/ipam/allocator.go
generated
vendored
18
vendor/github.com/docker/libnetwork/ipam/allocator.go
generated
vendored
|
@ -6,7 +6,7 @@ import (
|
|||
"sort"
|
||||
"sync"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/bitseq"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/discoverapi"
|
||||
|
@ -135,7 +135,7 @@ func (a *Allocator) checkConsistency(as string) {
|
|||
bm := a.addresses[sk]
|
||||
a.Unlock()
|
||||
if err := bm.CheckConsistency(); err != nil {
|
||||
log.Warnf("Error while running consistency check for %s: %v", sk, err)
|
||||
logrus.Warnf("Error while running consistency check for %s: %v", sk, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ func (a *Allocator) GetDefaultAddressSpaces() (string, string, error) {
|
|||
|
||||
// RequestPool returns an address pool along with its unique id.
|
||||
func (a *Allocator) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) {
|
||||
log.Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6)
|
||||
logrus.Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6)
|
||||
|
||||
k, nw, ipr, err := a.parsePoolRequest(addressSpace, pool, subPool, v6)
|
||||
if err != nil {
|
||||
|
@ -227,7 +227,7 @@ retry:
|
|||
insert, err := aSpace.updatePoolDBOnAdd(*k, nw, ipr, pdf)
|
||||
if err != nil {
|
||||
if _, ok := err.(types.MaskableError); ok {
|
||||
log.Debugf("Retrying predefined pool search: %v", err)
|
||||
logrus.Debugf("Retrying predefined pool search: %v", err)
|
||||
goto retry
|
||||
}
|
||||
return "", nil, nil, err
|
||||
|
@ -246,7 +246,7 @@ retry:
|
|||
|
||||
// ReleasePool releases the address pool identified by the passed id
|
||||
func (a *Allocator) ReleasePool(poolID string) error {
|
||||
log.Debugf("ReleasePool(%s)", poolID)
|
||||
logrus.Debugf("ReleasePool(%s)", poolID)
|
||||
k := SubnetKey{}
|
||||
if err := k.FromString(poolID); err != nil {
|
||||
return types.BadRequestErrorf("invalid pool id: %s", poolID)
|
||||
|
@ -322,7 +322,7 @@ func (a *Allocator) parsePoolRequest(addressSpace, pool, subPool string, v6 bool
|
|||
}
|
||||
|
||||
func (a *Allocator) insertBitMask(key SubnetKey, pool *net.IPNet) error {
|
||||
//log.Debugf("Inserting bitmask (%s, %s)", key.String(), pool.String())
|
||||
//logrus.Debugf("Inserting bitmask (%s, %s)", key.String(), pool.String())
|
||||
|
||||
store := a.getStore(key.AddressSpace)
|
||||
ipVer := getAddressVersion(pool.IP)
|
||||
|
@ -360,7 +360,7 @@ func (a *Allocator) retrieveBitmask(k SubnetKey, n *net.IPNet) (*bitseq.Handle,
|
|||
bm, ok := a.addresses[k]
|
||||
a.Unlock()
|
||||
if !ok {
|
||||
log.Debugf("Retrieving bitmask (%s, %s)", k.String(), n.String())
|
||||
logrus.Debugf("Retrieving bitmask (%s, %s)", k.String(), n.String())
|
||||
if err := a.insertBitMask(k, n); err != nil {
|
||||
return nil, types.InternalErrorf("could not find bitmask in datastore for %s", k.String())
|
||||
}
|
||||
|
@ -418,7 +418,7 @@ func (a *Allocator) getPredefinedPool(as string, ipV6 bool) (*net.IPNet, error)
|
|||
|
||||
// RequestAddress returns an address from the specified pool ID
|
||||
func (a *Allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[string]string) (*net.IPNet, map[string]string, error) {
|
||||
log.Debugf("RequestAddress(%s, %v, %v)", poolID, prefAddress, opts)
|
||||
logrus.Debugf("RequestAddress(%s, %v, %v)", poolID, prefAddress, opts)
|
||||
k := SubnetKey{}
|
||||
if err := k.FromString(poolID); err != nil {
|
||||
return nil, nil, types.BadRequestErrorf("invalid pool id: %s", poolID)
|
||||
|
@ -467,7 +467,7 @@ func (a *Allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[s
|
|||
|
||||
// ReleaseAddress releases the address from the specified pool ID
|
||||
func (a *Allocator) ReleaseAddress(poolID string, address net.IP) error {
|
||||
log.Debugf("ReleaseAddress(%s, %v)", poolID, address)
|
||||
logrus.Debugf("ReleaseAddress(%s, %v)", poolID, address)
|
||||
k := SubnetKey{}
|
||||
if err := k.FromString(poolID); err != nil {
|
||||
return types.BadRequestErrorf("invalid pool id: %s", poolID)
|
||||
|
|
4
vendor/github.com/docker/libnetwork/ipam/store.go
generated
vendored
4
vendor/github.com/docker/libnetwork/ipam/store.go
generated
vendored
|
@ -3,7 +3,7 @@ package ipam
|
|||
import (
|
||||
"encoding/json"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/types"
|
||||
)
|
||||
|
@ -26,7 +26,7 @@ func (aSpace *addrSpace) KeyPrefix() []string {
|
|||
func (aSpace *addrSpace) Value() []byte {
|
||||
b, err := json.Marshal(aSpace)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to marshal ipam configured pools: %v", err)
|
||||
logrus.Warnf("Failed to marshal ipam configured pools: %v", err)
|
||||
return nil
|
||||
}
|
||||
return b
|
||||
|
|
43
vendor/github.com/docker/libnetwork/ipams/builtin/builtin_windows.go
generated
vendored
43
vendor/github.com/docker/libnetwork/ipams/builtin/builtin_windows.go
generated
vendored
|
@ -3,14 +3,55 @@
|
|||
package builtin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/ipam"
|
||||
"github.com/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/libnetwork/ipamutils"
|
||||
|
||||
windowsipam "github.com/docker/libnetwork/ipams/windowsipam"
|
||||
)
|
||||
|
||||
// InitDockerDefault registers the built-in ipam service with libnetwork
|
||||
func InitDockerDefault(ic ipamapi.Callback, l, g interface{}) error {
|
||||
var (
|
||||
ok bool
|
||||
localDs, globalDs datastore.DataStore
|
||||
)
|
||||
|
||||
if l != nil {
|
||||
if localDs, ok = l.(datastore.DataStore); !ok {
|
||||
return fmt.Errorf("incorrect local datastore passed to built-in ipam init")
|
||||
}
|
||||
}
|
||||
|
||||
if g != nil {
|
||||
if globalDs, ok = g.(datastore.DataStore); !ok {
|
||||
return fmt.Errorf("incorrect global datastore passed to built-in ipam init")
|
||||
}
|
||||
}
|
||||
|
||||
ipamutils.InitNetworks()
|
||||
|
||||
a, err := ipam.NewAllocator(localDs, globalDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cps := &ipamapi.Capability{RequiresRequestReplay: true}
|
||||
|
||||
return ic.RegisterIpamDriverWithCapabilities(ipamapi.DefaultIPAM, a, cps)
|
||||
}
|
||||
|
||||
// Init registers the built-in ipam service with libnetwork
|
||||
func Init(ic ipamapi.Callback, l, g interface{}) error {
|
||||
initFunc := windowsipam.GetInit(ipamapi.DefaultIPAM)
|
||||
initFunc := windowsipam.GetInit(windowsipam.DefaultIPAM)
|
||||
|
||||
err := InitDockerDefault(ic, l, g)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return initFunc(ic, l, g)
|
||||
}
|
||||
|
|
10
vendor/github.com/docker/libnetwork/ipams/remote/remote.go
generated
vendored
10
vendor/github.com/docker/libnetwork/ipams/remote/remote.go
generated
vendored
|
@ -4,7 +4,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
"github.com/docker/libnetwork/discoverapi"
|
||||
"github.com/docker/libnetwork/ipamapi"
|
||||
|
@ -40,13 +40,13 @@ func Init(cb ipamapi.Callback, l, g interface{}) error {
|
|||
a := newAllocator(name, client)
|
||||
if cps, err := a.(*allocator).getCapabilities(); err == nil {
|
||||
if err := cb.RegisterIpamDriverWithCapabilities(name, a, cps); err != nil {
|
||||
log.Errorf("error registering remote ipam driver %s due to %v", name, err)
|
||||
logrus.Errorf("error registering remote ipam driver %s due to %v", name, err)
|
||||
}
|
||||
} else {
|
||||
log.Infof("remote ipam driver %s does not support capabilities", name)
|
||||
log.Debug(err)
|
||||
logrus.Infof("remote ipam driver %s does not support capabilities", name)
|
||||
logrus.Debug(err)
|
||||
if err := cb.RegisterIpamDriver(name, a); err != nil {
|
||||
log.Errorf("error registering remote ipam driver %s due to %v", name, err)
|
||||
logrus.Errorf("error registering remote ipam driver %s due to %v", name, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
13
vendor/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go
generated
vendored
13
vendor/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go
generated
vendored
|
@ -3,7 +3,7 @@ package windowsipam
|
|||
import (
|
||||
"net"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/discoverapi"
|
||||
"github.com/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/libnetwork/netlabel"
|
||||
|
@ -15,6 +15,9 @@ const (
|
|||
globalAddressSpace = "GlobalDefault"
|
||||
)
|
||||
|
||||
// DefaultIPAM defines the default ipam-driver for local-scoped windows networks
|
||||
const DefaultIPAM = "windows"
|
||||
|
||||
var (
|
||||
defaultPool, _ = types.ParseCIDR("0.0.0.0/0")
|
||||
)
|
||||
|
@ -36,7 +39,7 @@ func (a *allocator) GetDefaultAddressSpaces() (string, string, error) {
|
|||
// RequestPool returns an address pool along with its unique id. This is a null ipam driver. It allocates the
|
||||
// subnet user asked and does not validate anything. Doesn't support subpool allocation
|
||||
func (a *allocator) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) {
|
||||
log.Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6)
|
||||
logrus.Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6)
|
||||
if subPool != "" || v6 {
|
||||
return "", nil, nil, types.InternalErrorf("This request is not supported by null ipam driver")
|
||||
}
|
||||
|
@ -58,14 +61,14 @@ func (a *allocator) RequestPool(addressSpace, pool, subPool string, options map[
|
|||
|
||||
// ReleasePool releases the address pool - always succeeds
|
||||
func (a *allocator) ReleasePool(poolID string) error {
|
||||
log.Debugf("ReleasePool(%s)", poolID)
|
||||
logrus.Debugf("ReleasePool(%s)", poolID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestAddress returns an address from the specified pool ID.
|
||||
// Always allocate the 0.0.0.0/32 ip if no preferred address was specified
|
||||
func (a *allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[string]string) (*net.IPNet, map[string]string, error) {
|
||||
log.Debugf("RequestAddress(%s, %v, %v)", poolID, prefAddress, opts)
|
||||
logrus.Debugf("RequestAddress(%s, %v, %v)", poolID, prefAddress, opts)
|
||||
_, ipNet, err := net.ParseCIDR(poolID)
|
||||
|
||||
if err != nil {
|
||||
|
@ -85,7 +88,7 @@ func (a *allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[s
|
|||
|
||||
// ReleaseAddress releases the address - always succeeds
|
||||
func (a *allocator) ReleaseAddress(poolID string, address net.IP) error {
|
||||
log.Debugf("ReleaseAddress(%s, %v)", poolID, address)
|
||||
logrus.Debugf("ReleaseAddress(%s, %v)", poolID, address)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/docker/libnetwork/ipvs/netlink.go
generated
vendored
2
vendor/github.com/docker/libnetwork/ipvs/netlink.go
generated
vendored
|
@ -64,7 +64,7 @@ func setup() {
|
|||
|
||||
ipvsFamily, err = getIPVSFamily()
|
||||
if err != nil {
|
||||
logrus.Errorf("Could not get ipvs family information from the kernel. It is possible that ipvs is not enabled in your kernel. Native loadbalancing will not work until this is fixed.")
|
||||
logrus.Error("Could not get ipvs family information from the kernel. It is possible that ipvs is not enabled in your kernel. Native loadbalancing will not work until this is fixed.")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
4
vendor/github.com/docker/libnetwork/netutils/utils_windows.go
generated
vendored
4
vendor/github.com/docker/libnetwork/netutils/utils_windows.go
generated
vendored
|
@ -18,6 +18,8 @@ func ElectInterfaceAddresses(name string) ([]*net.IPNet, []*net.IPNet, error) {
|
|||
|
||||
// FindAvailableNetwork returns a network from the passed list which does not
|
||||
// overlap with existing interfaces in the system
|
||||
|
||||
// TODO : Use appropriate windows APIs to identify non-overlapping subnets
|
||||
func FindAvailableNetwork(list []*net.IPNet) (*net.IPNet, error) {
|
||||
return nil, types.NotImplementedErrorf("not supported on windows")
|
||||
return nil, nil
|
||||
}
|
||||
|
|
45
vendor/github.com/docker/libnetwork/network.go
generated
vendored
45
vendor/github.com/docker/libnetwork/network.go
generated
vendored
|
@ -8,7 +8,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/libnetwork/config"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
|
@ -473,7 +473,7 @@ func (n *network) UnmarshalJSON(b []byte) (err error) {
|
|||
if v, ok := netMap["created"]; ok {
|
||||
// n.created is time.Time but marshalled as string
|
||||
if err = n.created.UnmarshalText([]byte(v.(string))); err != nil {
|
||||
log.Warnf("failed to unmarshal creation time %v: %v", v, err)
|
||||
logrus.Warnf("failed to unmarshal creation time %v: %v", v, err)
|
||||
n.created = time.Time{}
|
||||
}
|
||||
}
|
||||
|
@ -633,6 +633,9 @@ func NetworkOptionIpam(ipamDriver string, addrSpace string, ipV4 []*IpamConf, ip
|
|||
return func(n *network) {
|
||||
if ipamDriver != "" {
|
||||
n.ipamType = ipamDriver
|
||||
if ipamDriver == ipamapi.DefaultIPAM {
|
||||
n.ipamType = defaultIpamForNetworkType(n.Type())
|
||||
}
|
||||
}
|
||||
n.ipamOptions = opts
|
||||
n.addrSpace = addrSpace
|
||||
|
@ -776,12 +779,12 @@ func (n *network) delete(force bool) error {
|
|||
if !force {
|
||||
return err
|
||||
}
|
||||
log.Debugf("driver failed to delete stale network %s (%s): %v", n.Name(), n.ID(), err)
|
||||
logrus.Debugf("driver failed to delete stale network %s (%s): %v", n.Name(), n.ID(), err)
|
||||
}
|
||||
|
||||
n.ipamRelease()
|
||||
if err = c.updateToStore(n); err != nil {
|
||||
log.Warnf("Failed to update store after ipam release for network %s (%s): %v", n.Name(), n.ID(), err)
|
||||
logrus.Warnf("Failed to update store after ipam release for network %s (%s): %v", n.Name(), n.ID(), err)
|
||||
}
|
||||
|
||||
// We are about to delete the network. Leave the gossip
|
||||
|
@ -792,7 +795,7 @@ func (n *network) delete(force bool) error {
|
|||
// bindings cleanup requires the network in the store.
|
||||
n.cancelDriverWatches()
|
||||
if err = n.leaveCluster(); err != nil {
|
||||
log.Errorf("Failed leaving network %s from the agent cluster: %v", n.Name(), err)
|
||||
logrus.Errorf("Failed leaving network %s from the agent cluster: %v", n.Name(), err)
|
||||
}
|
||||
|
||||
c.cleanupServiceBindings(n.ID())
|
||||
|
@ -804,7 +807,7 @@ func (n *network) delete(force bool) error {
|
|||
if !force {
|
||||
return fmt.Errorf("error deleting network endpoint count from store: %v", err)
|
||||
}
|
||||
log.Debugf("Error deleting endpoint count from store for stale network %s (%s) for deletion: %v", n.Name(), n.ID(), err)
|
||||
logrus.Debugf("Error deleting endpoint count from store for stale network %s (%s) for deletion: %v", n.Name(), n.ID(), err)
|
||||
}
|
||||
|
||||
if err = c.deleteFromStore(n); err != nil {
|
||||
|
@ -827,7 +830,7 @@ func (n *network) deleteNetwork() error {
|
|||
}
|
||||
|
||||
if _, ok := err.(types.MaskableError); !ok {
|
||||
log.Warnf("driver error deleting network %s : %v", n.name, err)
|
||||
logrus.Warnf("driver error deleting network %s : %v", n.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -920,7 +923,7 @@ func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoi
|
|||
defer func() {
|
||||
if err != nil {
|
||||
if e := ep.deleteEndpoint(false); e != nil {
|
||||
log.Warnf("cleaning up endpoint failed %s : %v", name, e)
|
||||
logrus.Warnf("cleaning up endpoint failed %s : %v", name, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -935,7 +938,7 @@ func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoi
|
|||
defer func() {
|
||||
if err != nil {
|
||||
if e := n.getController().deleteFromStore(ep); e != nil {
|
||||
log.Warnf("error rolling back endpoint %s from store: %v", name, e)
|
||||
logrus.Warnf("error rolling back endpoint %s from store: %v", name, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -961,7 +964,7 @@ func (n *network) Endpoints() []Endpoint {
|
|||
|
||||
endpoints, err := n.getEndpointsFromStore()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
logrus.Error(err)
|
||||
}
|
||||
|
||||
for _, ep := range endpoints {
|
||||
|
@ -1169,7 +1172,7 @@ func (n *network) getSvcRecords(ep *endpoint) []etchosts.Record {
|
|||
continue
|
||||
}
|
||||
if len(ip) == 0 {
|
||||
log.Warnf("Found empty list of IP addresses for service %s on network %s (%s)", h, n.name, n.id)
|
||||
logrus.Warnf("Found empty list of IP addresses for service %s on network %s (%s)", h, n.name, n.id)
|
||||
continue
|
||||
}
|
||||
recs = append(recs, etchosts.Record{
|
||||
|
@ -1253,7 +1256,7 @@ func (n *network) requestPoolHelper(ipam ipamapi.Ipam, addressSpace, preferredPo
|
|||
// pools.
|
||||
defer func() {
|
||||
if err := ipam.ReleasePool(poolID); err != nil {
|
||||
log.Warnf("Failed to release overlapping pool %s while returning from pool request helper for network %s", pool, n.Name())
|
||||
logrus.Warnf("Failed to release overlapping pool %s while returning from pool request helper for network %s", pool, n.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -1294,7 +1297,7 @@ func (n *network) ipamAllocateVersion(ipVer int, ipam ipamapi.Ipam) error {
|
|||
|
||||
*infoList = make([]*IpamInfo, len(*cfgList))
|
||||
|
||||
log.Debugf("Allocating IPv%d pools for network %s (%s)", ipVer, n.Name(), n.ID())
|
||||
logrus.Debugf("Allocating IPv%d pools for network %s (%s)", ipVer, n.Name(), n.ID())
|
||||
|
||||
for i, cfg := range *cfgList {
|
||||
if err = cfg.Validate(); err != nil {
|
||||
|
@ -1312,7 +1315,7 @@ func (n *network) ipamAllocateVersion(ipVer int, ipam ipamapi.Ipam) error {
|
|||
defer func() {
|
||||
if err != nil {
|
||||
if err := ipam.ReleasePool(d.PoolID); err != nil {
|
||||
log.Warnf("Failed to release address pool %s after failure to create network %s (%s)", d.PoolID, n.Name(), n.ID())
|
||||
logrus.Warnf("Failed to release address pool %s after failure to create network %s (%s)", d.PoolID, n.Name(), n.ID())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -1364,7 +1367,7 @@ func (n *network) ipamRelease() {
|
|||
}
|
||||
ipam, _, err := n.getController().getIPAMDriver(n.ipamType)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to retrieve ipam driver to release address pool(s) on delete of network %s (%s): %v", n.Name(), n.ID(), err)
|
||||
logrus.Warnf("Failed to retrieve ipam driver to release address pool(s) on delete of network %s (%s): %v", n.Name(), n.ID(), err)
|
||||
return
|
||||
}
|
||||
n.ipamReleaseVersion(4, ipam)
|
||||
|
@ -1380,7 +1383,7 @@ func (n *network) ipamReleaseVersion(ipVer int, ipam ipamapi.Ipam) {
|
|||
case 6:
|
||||
infoList = &n.ipamV6Info
|
||||
default:
|
||||
log.Warnf("incorrect ip version passed to ipam release: %d", ipVer)
|
||||
logrus.Warnf("incorrect ip version passed to ipam release: %d", ipVer)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1388,25 +1391,25 @@ func (n *network) ipamReleaseVersion(ipVer int, ipam ipamapi.Ipam) {
|
|||
return
|
||||
}
|
||||
|
||||
log.Debugf("releasing IPv%d pools from network %s (%s)", ipVer, n.Name(), n.ID())
|
||||
logrus.Debugf("releasing IPv%d pools from network %s (%s)", ipVer, n.Name(), n.ID())
|
||||
|
||||
for _, d := range *infoList {
|
||||
if d.Gateway != nil {
|
||||
if err := ipam.ReleaseAddress(d.PoolID, d.Gateway.IP); err != nil {
|
||||
log.Warnf("Failed to release gateway ip address %s on delete of network %s (%s): %v", d.Gateway.IP, n.Name(), n.ID(), err)
|
||||
logrus.Warnf("Failed to release gateway ip address %s on delete of network %s (%s): %v", d.Gateway.IP, n.Name(), n.ID(), err)
|
||||
}
|
||||
}
|
||||
if d.IPAMData.AuxAddresses != nil {
|
||||
for k, nw := range d.IPAMData.AuxAddresses {
|
||||
if d.Pool.Contains(nw.IP) {
|
||||
if err := ipam.ReleaseAddress(d.PoolID, nw.IP); err != nil && err != ipamapi.ErrIPOutOfRange {
|
||||
log.Warnf("Failed to release secondary ip address %s (%v) on delete of network %s (%s): %v", k, nw.IP, n.Name(), n.ID(), err)
|
||||
logrus.Warnf("Failed to release secondary ip address %s (%v) on delete of network %s (%s): %v", k, nw.IP, n.Name(), n.ID(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := ipam.ReleasePool(d.PoolID); err != nil {
|
||||
log.Warnf("Failed to release address pool %s on delete of network %s (%s): %v", d.PoolID, n.Name(), n.ID(), err)
|
||||
logrus.Warnf("Failed to release address pool %s on delete of network %s (%s): %v", d.PoolID, n.Name(), n.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1658,7 +1661,7 @@ func (n *network) ResolveService(name string) ([]*net.SRV, []net.IP) {
|
|||
srv := []*net.SRV{}
|
||||
ip := []net.IP{}
|
||||
|
||||
log.Debugf("Service name To resolve: %v", name)
|
||||
logrus.Debugf("Service name To resolve: %v", name)
|
||||
|
||||
// There are DNS implementaions that allow SRV queries for names not in
|
||||
// the format defined by RFC 2782. Hence specific validations checks are
|
||||
|
|
6
vendor/github.com/docker/libnetwork/network_unix.go
generated
vendored
6
vendor/github.com/docker/libnetwork/network_unix.go
generated
vendored
|
@ -2,7 +2,13 @@
|
|||
|
||||
package libnetwork
|
||||
|
||||
import "github.com/docker/libnetwork/ipamapi"
|
||||
|
||||
// Stub implementations for DNS related functions
|
||||
|
||||
func (n *network) startResolver() {
|
||||
}
|
||||
|
||||
func defaultIpamForNetworkType(networkType string) string {
|
||||
return ipamapi.DefaultIPAM
|
||||
}
|
||||
|
|
38
vendor/github.com/docker/libnetwork/network_windows.go
generated
vendored
38
vendor/github.com/docker/libnetwork/network_windows.go
generated
vendored
|
@ -4,17 +4,20 @@ package libnetwork
|
|||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/drivers/windows"
|
||||
"github.com/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/libnetwork/ipams/windowsipam"
|
||||
)
|
||||
|
||||
func executeInCompartment(compartmentID uint32, x func()) {
|
||||
runtime.LockOSThread()
|
||||
|
||||
if err := hcsshim.SetCurrentThreadCompartmentId(compartmentID); err != nil {
|
||||
log.Error(err)
|
||||
logrus.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
hcsshim.SetCurrentThreadCompartmentId(0)
|
||||
|
@ -26,7 +29,7 @@ func executeInCompartment(compartmentID uint32, x func()) {
|
|||
|
||||
func (n *network) startResolver() {
|
||||
n.resolverOnce.Do(func() {
|
||||
log.Debugf("Launching DNS server for network", n.Name())
|
||||
logrus.Debugf("Launching DNS server for network", n.Name())
|
||||
options := n.Info().DriverOptions()
|
||||
hnsid := options[windows.HNSID]
|
||||
|
||||
|
@ -36,21 +39,34 @@ func (n *network) startResolver() {
|
|||
|
||||
hnsresponse, err := hcsshim.HNSNetworkRequest("GET", hnsid, "")
|
||||
if err != nil {
|
||||
log.Errorf("Resolver Setup/Start failed for container %s, %q", n.Name(), err)
|
||||
logrus.Errorf("Resolver Setup/Start failed for container %s, %q", n.Name(), err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, subnet := range hnsresponse.Subnets {
|
||||
if subnet.GatewayAddress != "" {
|
||||
resolver := NewResolver(subnet.GatewayAddress, false, "", n)
|
||||
log.Debugf("Binding a resolver on network %s gateway %s", n.Name(), subnet.GatewayAddress)
|
||||
executeInCompartment(hnsresponse.DNSServerCompartment, resolver.SetupFunc(53))
|
||||
if err = resolver.Start(); err != nil {
|
||||
log.Errorf("Resolver Setup/Start failed for container %s, %q", n.Name(), err)
|
||||
} else {
|
||||
n.resolver = append(n.resolver, resolver)
|
||||
for i := 0; i < 3; i++ {
|
||||
resolver := NewResolver(subnet.GatewayAddress, false, "", n)
|
||||
logrus.Debugf("Binding a resolver on network %s gateway %s", n.Name(), subnet.GatewayAddress)
|
||||
executeInCompartment(hnsresponse.DNSServerCompartment, resolver.SetupFunc(53))
|
||||
|
||||
if err = resolver.Start(); err != nil {
|
||||
logrus.Errorf("Resolver Setup/Start failed for container %s, %q", n.Name(), err)
|
||||
time.Sleep(1 * time.Second)
|
||||
} else {
|
||||
logrus.Debugf("Resolver bound successfuly for network %s", n.Name())
|
||||
n.resolver = append(n.resolver, resolver)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func defaultIpamForNetworkType(networkType string) string {
|
||||
if windows.IsBuiltinLocalDriver(networkType) {
|
||||
return windowsipam.DefaultIPAM
|
||||
}
|
||||
return ipamapi.DefaultIPAM
|
||||
}
|
||||
|
|
3
vendor/github.com/docker/libnetwork/networkdb/cluster.go
generated
vendored
3
vendor/github.com/docker/libnetwork/networkdb/cluster.go
generated
vendored
|
@ -139,7 +139,6 @@ func (nDB *NetworkDB) clusterInit() error {
|
|||
|
||||
nDB.stopCh = make(chan struct{})
|
||||
nDB.memberlist = mlist
|
||||
nDB.mConfig = config
|
||||
|
||||
for _, trigger := range []struct {
|
||||
interval time.Duration
|
||||
|
@ -242,7 +241,7 @@ func (nDB *NetworkDB) reapDeadNode() {
|
|||
defer nDB.Unlock()
|
||||
for id, n := range nDB.failedNodes {
|
||||
if n.reapTime > 0 {
|
||||
n.reapTime -= reapPeriod
|
||||
n.reapTime -= nodeReapPeriod
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Removing failed node %v from gossip cluster", n.Name)
|
||||
|
|
5
vendor/github.com/docker/libnetwork/networkdb/networkdb.go
generated
vendored
5
vendor/github.com/docker/libnetwork/networkdb/networkdb.go
generated
vendored
|
@ -29,10 +29,6 @@ type NetworkDB struct {
|
|||
// NetworkDB configuration.
|
||||
config *Config
|
||||
|
||||
// local copy of memberlist config that we use to driver
|
||||
// network scoped gossip and bulk sync.
|
||||
mConfig *memberlist.Config
|
||||
|
||||
// All the tree index (byTable, byNetwork) that we maintain
|
||||
// the db.
|
||||
indexes map[int]*radix.Tree
|
||||
|
@ -57,7 +53,6 @@ type NetworkDB struct {
|
|||
|
||||
// A map of nodes which are participating in a given
|
||||
// network. The key is a network ID.
|
||||
|
||||
networkNodes map[string][]string
|
||||
|
||||
// A table of ack channels for every node from which we are
|
||||
|
|
8
vendor/github.com/docker/libnetwork/ns/init_linux.go
generated
vendored
8
vendor/github.com/docker/libnetwork/ns/init_linux.go
generated
vendored
|
@ -8,7 +8,7 @@ import (
|
|||
"sync"
|
||||
"syscall"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/vishvananda/netlink"
|
||||
"github.com/vishvananda/netns"
|
||||
)
|
||||
|
@ -24,11 +24,11 @@ func Init() {
|
|||
var err error
|
||||
initNs, err = netns.Get()
|
||||
if err != nil {
|
||||
log.Errorf("could not get initial namespace: %v", err)
|
||||
logrus.Errorf("could not get initial namespace: %v", err)
|
||||
}
|
||||
initNl, err = netlink.NewHandle(getSupportedNlFamilies()...)
|
||||
if err != nil {
|
||||
log.Errorf("could not create netlink handle on initial namespace: %v", err)
|
||||
logrus.Errorf("could not create netlink handle on initial namespace: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ func getSupportedNlFamilies() []int {
|
|||
fams := []int{syscall.NETLINK_ROUTE}
|
||||
if err := loadXfrmModules(); err != nil {
|
||||
if checkXfrmSocket() != nil {
|
||||
log.Warnf("Could not load necessary modules for IPSEC rules: %v", err)
|
||||
logrus.Warnf("Could not load necessary modules for IPSEC rules: %v", err)
|
||||
return fams
|
||||
}
|
||||
}
|
||||
|
|
31
vendor/github.com/docker/libnetwork/osl/interface_linux.go
generated
vendored
31
vendor/github.com/docker/libnetwork/osl/interface_linux.go
generated
vendored
|
@ -8,7 +8,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/ns"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/vishvananda/netlink"
|
||||
|
@ -167,7 +167,7 @@ func (i *nwIface) Remove() error {
|
|||
|
||||
err = nlh.LinkSetName(iface, i.SrcName())
|
||||
if err != nil {
|
||||
log.Debugf("LinkSetName failed for interface %s: %v", i.SrcName(), err)
|
||||
logrus.Debugf("LinkSetName failed for interface %s: %v", i.SrcName(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -179,7 +179,7 @@ func (i *nwIface) Remove() error {
|
|||
} else if !isDefault {
|
||||
// Move the network interface to caller namespace.
|
||||
if err := nlh.LinkSetNsFd(iface, ns.ParseHandlerInt()); err != nil {
|
||||
log.Debugf("LinkSetNsPid failed for interface %s: %v", i.SrcName(), err)
|
||||
logrus.Debugf("LinkSetNsPid failed for interface %s: %v", i.SrcName(), err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ func (n *networkNamespace) AddInterface(srcName, dstPrefix string, options ...If
|
|||
// Up the interface.
|
||||
cnt := 0
|
||||
for err = nlh.LinkSetUp(iface); err != nil && cnt < 3; cnt++ {
|
||||
log.Debugf("retrying link setup because of: %v", err)
|
||||
logrus.Debugf("retrying link setup because of: %v", err)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
err = nlh.LinkSetUp(iface)
|
||||
}
|
||||
|
@ -377,7 +377,9 @@ func setInterfaceIP(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
|
|||
if i.Address() == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := checkRouteConflict(nlh, i.Address(), netlink.FAMILY_V4); err != nil {
|
||||
return err
|
||||
}
|
||||
ipAddr := &netlink.Addr{IPNet: i.Address(), Label: ""}
|
||||
return nlh.AddrAdd(iface, ipAddr)
|
||||
}
|
||||
|
@ -386,6 +388,9 @@ func setInterfaceIPv6(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error
|
|||
if i.AddressIPv6() == nil {
|
||||
return nil
|
||||
}
|
||||
if err := checkRouteConflict(nlh, i.AddressIPv6(), netlink.FAMILY_V6); err != nil {
|
||||
return err
|
||||
}
|
||||
ipAddr := &netlink.Addr{IPNet: i.AddressIPv6(), Label: "", Flags: syscall.IFA_F_NODAD}
|
||||
return nlh.AddrAdd(iface, ipAddr)
|
||||
}
|
||||
|
@ -442,3 +447,19 @@ func scanInterfaceStats(data, ifName string, i *types.InterfaceStatistics) error
|
|||
|
||||
return err
|
||||
}
|
||||
|
||||
func checkRouteConflict(nlh *netlink.Handle, address *net.IPNet, family int) error {
|
||||
routes, err := nlh.RouteList(nil, family)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, route := range routes {
|
||||
if route.Dst != nil {
|
||||
if route.Dst.Contains(address.IP) || address.Contains(route.Dst.IP) {
|
||||
return fmt.Errorf("cannot program address %v in sandbox interface because it conflicts with existing route %s",
|
||||
address, route)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
8
vendor/github.com/docker/libnetwork/osl/namespace_linux.go
generated
vendored
8
vendor/github.com/docker/libnetwork/osl/namespace_linux.go
generated
vendored
|
@ -14,7 +14,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/libnetwork/ns"
|
||||
"github.com/docker/libnetwork/types"
|
||||
|
@ -263,10 +263,10 @@ func GetSandboxForExternalKey(basePath string, key string) (Sandbox, error) {
|
|||
|
||||
func reexecCreateNamespace() {
|
||||
if len(os.Args) < 2 {
|
||||
log.Fatal("no namespace path provided")
|
||||
logrus.Fatal("no namespace path provided")
|
||||
}
|
||||
if err := mountNetworkNamespace("/proc/self/ns/net", os.Args[1]); err != nil {
|
||||
log.Fatal(err)
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -338,7 +338,7 @@ func (n *networkNamespace) InvokeFunc(f func()) error {
|
|||
func InitOSContext() func() {
|
||||
runtime.LockOSThread()
|
||||
if err := ns.SetNamespace(); err != nil {
|
||||
log.Error(err)
|
||||
logrus.Error(err)
|
||||
}
|
||||
return runtime.UnlockOSThread
|
||||
}
|
||||
|
|
24
vendor/github.com/docker/libnetwork/resolver.go
generated
vendored
24
vendor/github.com/docker/libnetwork/resolver.go
generated
vendored
|
@ -8,7 +8,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
@ -219,7 +219,7 @@ func (r *resolver) handleIPQuery(name string, query *dns.Msg, ipType int) (*dns.
|
|||
|
||||
if addr == nil && ipv6Miss {
|
||||
// Send a reply without any Answer sections
|
||||
log.Debugf("Lookup name %s present without IPv6 address", name)
|
||||
logrus.Debugf("Lookup name %s present without IPv6 address", name)
|
||||
resp := createRespMsg(query)
|
||||
return resp, nil
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ func (r *resolver) handleIPQuery(name string, query *dns.Msg, ipType int) (*dns.
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
log.Debugf("Lookup for %s: IP %v", name, addr)
|
||||
logrus.Debugf("Lookup for %s: IP %v", name, addr)
|
||||
|
||||
resp := createRespMsg(query)
|
||||
if len(addr) > 1 {
|
||||
|
@ -268,7 +268,7 @@ func (r *resolver) handlePTRQuery(ptr string, query *dns.Msg) (*dns.Msg, error)
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
log.Debugf("Lookup for IP %s: name %s", parts[0], host)
|
||||
logrus.Debugf("Lookup for IP %s: name %s", parts[0], host)
|
||||
fqdn := dns.Fqdn(host)
|
||||
|
||||
resp := new(dns.Msg)
|
||||
|
@ -352,7 +352,7 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
logrus.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -411,10 +411,10 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
|||
|
||||
execErr := r.backend.ExecFunc(extConnect)
|
||||
if execErr != nil || err != nil {
|
||||
log.Debugf("Connect failed, %s", err)
|
||||
logrus.Debugf("Connect failed, %s", err)
|
||||
continue
|
||||
}
|
||||
log.Debugf("Query %s[%d] from %s, forwarding to %s:%s", name, query.Question[0].Qtype,
|
||||
logrus.Debugf("Query %s[%d] from %s, forwarding to %s:%s", name, query.Question[0].Qtype,
|
||||
extConn.LocalAddr().String(), proto, extDNS.ipStr)
|
||||
|
||||
// Timeout has to be set for every IO operation.
|
||||
|
@ -430,7 +430,7 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
|||
old := r.tStamp
|
||||
r.tStamp = time.Now()
|
||||
if r.tStamp.Sub(old) > logInterval {
|
||||
log.Errorf("More than %v concurrent queries from %s", maxConcurrent, extConn.LocalAddr().String())
|
||||
logrus.Errorf("More than %v concurrent queries from %s", maxConcurrent, extConn.LocalAddr().String())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -438,7 +438,7 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
|||
err = co.WriteMsg(query)
|
||||
if err != nil {
|
||||
r.forwardQueryEnd()
|
||||
log.Debugf("Send to DNS server failed, %s", err)
|
||||
logrus.Debugf("Send to DNS server failed, %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -447,7 +447,7 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
|||
// client can retry over TCP
|
||||
if err != nil && err != dns.ErrTruncated {
|
||||
r.forwardQueryEnd()
|
||||
log.Debugf("Read from DNS server failed, %s", err)
|
||||
logrus.Debugf("Read from DNS server failed, %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -462,7 +462,7 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
|||
}
|
||||
|
||||
if err = w.WriteMsg(resp); err != nil {
|
||||
log.Errorf("error writing resolver resp, %s", err)
|
||||
logrus.Errorf("error writing resolver resp, %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -483,7 +483,7 @@ func (r *resolver) forwardQueryEnd() {
|
|||
defer r.queryLock.Unlock()
|
||||
|
||||
if r.count == 0 {
|
||||
log.Errorf("Invalid concurrent query count")
|
||||
logrus.Error("Invalid concurrent query count")
|
||||
} else {
|
||||
r.count--
|
||||
}
|
||||
|
|
10
vendor/github.com/docker/libnetwork/resolver_unix.go
generated
vendored
10
vendor/github.com/docker/libnetwork/resolver_unix.go
generated
vendored
|
@ -9,7 +9,7 @@ import (
|
|||
"os/exec"
|
||||
"runtime"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/libnetwork/iptables"
|
||||
"github.com/vishvananda/netns"
|
||||
|
@ -31,7 +31,7 @@ func reexecSetupResolver() {
|
|||
defer runtime.UnlockOSThread()
|
||||
|
||||
if len(os.Args) < 4 {
|
||||
log.Error("invalid number of arguments..")
|
||||
logrus.Error("invalid number of arguments..")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -46,14 +46,14 @@ func reexecSetupResolver() {
|
|||
|
||||
f, err := os.OpenFile(os.Args[1], os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
log.Errorf("failed get network namespace %q: %v", os.Args[1], err)
|
||||
logrus.Errorf("failed get network namespace %q: %v", os.Args[1], err)
|
||||
os.Exit(2)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
nsFD := f.Fd()
|
||||
if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
|
||||
log.Errorf("setting into container net ns %v failed, %v", os.Args[1], err)
|
||||
logrus.Errorf("setting into container net ns %v failed, %v", os.Args[1], err)
|
||||
os.Exit(3)
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ func reexecSetupResolver() {
|
|||
|
||||
for _, rule := range rules {
|
||||
if iptables.RawCombinedOutputNative(rule...) != nil {
|
||||
log.Errorf("setting up rule failed, %v", rule)
|
||||
logrus.Errorf("setting up rule failed, %v", rule)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
34
vendor/github.com/docker/libnetwork/sandbox.go
generated
vendored
34
vendor/github.com/docker/libnetwork/sandbox.go
generated
vendored
|
@ -9,7 +9,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/etchosts"
|
||||
"github.com/docker/libnetwork/netlabel"
|
||||
"github.com/docker/libnetwork/osl"
|
||||
|
@ -213,18 +213,18 @@ func (sb *sandbox) delete(force bool) error {
|
|||
if c.isDistributedControl() {
|
||||
retain = true
|
||||
}
|
||||
log.Warnf("Failed getting network for ep %s during sandbox %s delete: %v", ep.ID(), sb.ID(), err)
|
||||
logrus.Warnf("Failed getting network for ep %s during sandbox %s delete: %v", ep.ID(), sb.ID(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !force {
|
||||
if err := ep.Leave(sb); err != nil {
|
||||
log.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err)
|
||||
logrus.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := ep.Delete(force); err != nil {
|
||||
log.Warnf("Failed deleting endpoint %s: %v\n", ep.ID(), err)
|
||||
logrus.Warnf("Failed deleting endpoint %s: %v\n", ep.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -247,7 +247,7 @@ func (sb *sandbox) delete(force bool) error {
|
|||
}
|
||||
|
||||
if err := sb.storeDelete(); err != nil {
|
||||
log.Warnf("Failed to delete sandbox %s from store: %v", sb.ID(), err)
|
||||
logrus.Warnf("Failed to delete sandbox %s from store: %v", sb.ID(), err)
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
|
@ -291,7 +291,7 @@ func (sb *sandbox) Refresh(options ...SandboxOption) error {
|
|||
// Detach from all endpoints
|
||||
for _, ep := range epList {
|
||||
if err := ep.Leave(sb); err != nil {
|
||||
log.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err)
|
||||
logrus.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -307,7 +307,7 @@ func (sb *sandbox) Refresh(options ...SandboxOption) error {
|
|||
// Re-connect to all endpoints
|
||||
for _, ep := range epList {
|
||||
if err := ep.Join(sb); err != nil {
|
||||
log.Warnf("Failed attach sandbox %s to endpoint %s: %v\n", sb.ID(), ep.ID(), err)
|
||||
logrus.Warnf("Failed attach sandbox %s to endpoint %s: %v\n", sb.ID(), ep.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -413,7 +413,7 @@ func (sb *sandbox) updateGateway(ep *endpoint) error {
|
|||
|
||||
func (sb *sandbox) ResolveIP(ip string) string {
|
||||
var svc string
|
||||
log.Debugf("IP To resolve %v", ip)
|
||||
logrus.Debugf("IP To resolve %v", ip)
|
||||
|
||||
for _, ep := range sb.getConnectedEndpoints() {
|
||||
n := ep.getNetwork()
|
||||
|
@ -434,7 +434,7 @@ func (sb *sandbox) ResolveService(name string) ([]*net.SRV, []net.IP) {
|
|||
srv := []*net.SRV{}
|
||||
ip := []net.IP{}
|
||||
|
||||
log.Debugf("Service name To resolve: %v", name)
|
||||
logrus.Debugf("Service name To resolve: %v", name)
|
||||
|
||||
// There are DNS implementaions that allow SRV queries for names not in
|
||||
// the format defined by RFC 2782. Hence specific validations checks are
|
||||
|
@ -497,7 +497,7 @@ func (sb *sandbox) ResolveName(name string, ipType int) ([]net.IP, bool) {
|
|||
// {a.b in network c.d},
|
||||
// {a in network b.c.d},
|
||||
|
||||
log.Debugf("Name To resolve: %v", name)
|
||||
logrus.Debugf("Name To resolve: %v", name)
|
||||
name = strings.TrimSuffix(name, ".")
|
||||
reqName := []string{name}
|
||||
networkName := []string{""}
|
||||
|
@ -605,7 +605,7 @@ func (sb *sandbox) resolveName(req string, networkName string, epList []*endpoin
|
|||
func (sb *sandbox) SetKey(basePath string) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugf("sandbox set key processing took %s for container %s", time.Now().Sub(start), sb.ContainerID())
|
||||
logrus.Debugf("sandbox set key processing took %s for container %s", time.Now().Sub(start), sb.ContainerID())
|
||||
}()
|
||||
|
||||
if basePath == "" {
|
||||
|
@ -646,10 +646,10 @@ func (sb *sandbox) SetKey(basePath string) error {
|
|||
|
||||
if err := sb.osSbox.InvokeFunc(sb.resolver.SetupFunc(0)); err == nil {
|
||||
if err := sb.resolver.Start(); err != nil {
|
||||
log.Errorf("Resolver Start failed for container %s, %q", sb.ContainerID(), err)
|
||||
logrus.Errorf("Resolver Start failed for container %s, %q", sb.ContainerID(), err)
|
||||
}
|
||||
} else {
|
||||
log.Errorf("Resolver Setup Function failed for container %s, %q", sb.ContainerID(), err)
|
||||
logrus.Errorf("Resolver Setup Function failed for container %s, %q", sb.ContainerID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -690,7 +690,7 @@ func releaseOSSboxResources(osSbox osl.Sandbox, ep *endpoint) {
|
|||
// Only remove the interfaces owned by this endpoint from the sandbox.
|
||||
if ep.hasInterface(i.SrcName()) {
|
||||
if err := i.Remove(); err != nil {
|
||||
log.Debugf("Remove interface %s failed: %v", i.SrcName(), err)
|
||||
logrus.Debugf("Remove interface %s failed: %v", i.SrcName(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -706,7 +706,7 @@ func releaseOSSboxResources(osSbox osl.Sandbox, ep *endpoint) {
|
|||
// Remove non-interface routes.
|
||||
for _, r := range joinInfo.StaticRoutes {
|
||||
if err := osSbox.RemoveStaticRoute(r); err != nil {
|
||||
log.Debugf("Remove route failed: %v", err)
|
||||
logrus.Debugf("Remove route failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -741,7 +741,7 @@ func (sb *sandbox) restoreOslSandbox() error {
|
|||
ep.Unlock()
|
||||
|
||||
if i == nil {
|
||||
log.Errorf("error restoring endpoint %s for container %s", ep.Name(), sb.ContainerID())
|
||||
logrus.Errorf("error restoring endpoint %s for container %s", ep.Name(), sb.ContainerID())
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -876,7 +876,7 @@ func (sb *sandbox) clearNetworkResources(origEp *endpoint) error {
|
|||
if len(sb.endpoints) == 0 {
|
||||
// sb.endpoints should never be empty and this is unexpected error condition
|
||||
// We log an error message to note this down for debugging purposes.
|
||||
log.Errorf("No endpoints in sandbox while trying to remove endpoint %s", ep.Name())
|
||||
logrus.Errorf("No endpoints in sandbox while trying to remove endpoint %s", ep.Name())
|
||||
sb.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
|
14
vendor/github.com/docker/libnetwork/sandbox_dns_unix.go
generated
vendored
14
vendor/github.com/docker/libnetwork/sandbox_dns_unix.go
generated
vendored
|
@ -11,7 +11,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/etchosts"
|
||||
"github.com/docker/libnetwork/resolvconf"
|
||||
"github.com/docker/libnetwork/types"
|
||||
|
@ -40,19 +40,19 @@ func (sb *sandbox) startResolver(restore bool) {
|
|||
if !restore {
|
||||
err = sb.rebuildDNS()
|
||||
if err != nil {
|
||||
log.Errorf("Updating resolv.conf failed for container %s, %q", sb.ContainerID(), err)
|
||||
logrus.Errorf("Updating resolv.conf failed for container %s, %q", sb.ContainerID(), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
sb.resolver.SetExtServers(sb.extDNS)
|
||||
|
||||
if err = sb.osSbox.InvokeFunc(sb.resolver.SetupFunc(0)); err != nil {
|
||||
log.Errorf("Resolver Setup function failed for container %s, %q", sb.ContainerID(), err)
|
||||
logrus.Errorf("Resolver Setup function failed for container %s, %q", sb.ContainerID(), err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = sb.resolver.Start(); err != nil {
|
||||
log.Errorf("Resolver Start failed for container %s, %q", sb.ContainerID(), err)
|
||||
logrus.Errorf("Resolver Start failed for container %s, %q", sb.ContainerID(), err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -125,13 +125,13 @@ func (sb *sandbox) updateHostsFile(ifaceIP string) error {
|
|||
|
||||
func (sb *sandbox) addHostsEntries(recs []etchosts.Record) {
|
||||
if err := etchosts.Add(sb.config.hostsPath, recs); err != nil {
|
||||
log.Warnf("Failed adding service host entries to the running container: %v", err)
|
||||
logrus.Warnf("Failed adding service host entries to the running container: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (sb *sandbox) deleteHostsEntries(recs []etchosts.Record) {
|
||||
if err := etchosts.Delete(sb.config.hostsPath, recs); err != nil {
|
||||
log.Warnf("Failed deleting service host entries to the running container: %v", err)
|
||||
logrus.Warnf("Failed deleting service host entries to the running container: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -261,7 +261,7 @@ func (sb *sandbox) updateDNS(ipv6Enabled bool) error {
|
|||
if currHash != "" && currHash != currRC.Hash {
|
||||
// Seems the user has changed the container resolv.conf since the last time
|
||||
// we checked so return without doing anything.
|
||||
//log.Infof("Skipping update of resolv.conf file with ipv6Enabled: %t because file was touched by user", ipv6Enabled)
|
||||
//logrus.Infof("Skipping update of resolv.conf file with ipv6Enabled: %t because file was touched by user", ipv6Enabled)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/docker/libnetwork/sandbox_store.go
generated
vendored
2
vendor/github.com/docker/libnetwork/sandbox_store.go
generated
vendored
|
@ -178,7 +178,7 @@ func (sb *sandbox) storeDelete() error {
|
|||
func (c *controller) sandboxCleanup(activeSandboxes map[string]interface{}) {
|
||||
store := c.getStore(datastore.LocalScope)
|
||||
if store == nil {
|
||||
logrus.Errorf("Could not find local scope store while trying to cleanup sandboxes")
|
||||
logrus.Error("Could not find local scope store while trying to cleanup sandboxes")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
225
vendor/github.com/docker/libnetwork/service_common.go
generated
vendored
Normal file
225
vendor/github.com/docker/libnetwork/service_common.go
generated
vendored
Normal file
|
@ -0,0 +1,225 @@
|
|||
// +build linux windows
|
||||
|
||||
package libnetwork
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func newService(name string, id string, ingressPorts []*PortConfig, aliases []string) *service {
|
||||
return &service{
|
||||
name: name,
|
||||
id: id,
|
||||
ingressPorts: ingressPorts,
|
||||
loadBalancers: make(map[string]*loadBalancer),
|
||||
aliases: aliases,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controller) cleanupServiceBindings(cleanupNID string) {
|
||||
var cleanupFuncs []func()
|
||||
|
||||
c.Lock()
|
||||
services := make([]*service, 0, len(c.serviceBindings))
|
||||
for _, s := range c.serviceBindings {
|
||||
services = append(services, s)
|
||||
}
|
||||
c.Unlock()
|
||||
|
||||
for _, s := range services {
|
||||
s.Lock()
|
||||
for nid, lb := range s.loadBalancers {
|
||||
if cleanupNID != "" && nid != cleanupNID {
|
||||
continue
|
||||
}
|
||||
|
||||
for eid, ip := range lb.backEnds {
|
||||
service := s
|
||||
loadBalancer := lb
|
||||
networkID := nid
|
||||
epID := eid
|
||||
epIP := ip
|
||||
|
||||
cleanupFuncs = append(cleanupFuncs, func() {
|
||||
if err := c.rmServiceBinding(service.name, service.id, networkID, epID, loadBalancer.vip,
|
||||
service.ingressPorts, service.aliases, epIP); err != nil {
|
||||
logrus.Errorf("Failed to remove service bindings for service %s network %s endpoint %s while cleanup: %v",
|
||||
service.id, networkID, epID, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
for _, f := range cleanupFuncs {
|
||||
f()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *controller) addServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, aliases []string, ip net.IP) error {
|
||||
var (
|
||||
s *service
|
||||
addService bool
|
||||
)
|
||||
|
||||
n, err := c.NetworkByID(nid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
skey := serviceKey{
|
||||
id: sid,
|
||||
ports: portConfigs(ingressPorts).String(),
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
s, ok := c.serviceBindings[skey]
|
||||
if !ok {
|
||||
// Create a new service if we are seeing this service
|
||||
// for the first time.
|
||||
s = newService(name, sid, ingressPorts, aliases)
|
||||
c.serviceBindings[skey] = s
|
||||
}
|
||||
c.Unlock()
|
||||
|
||||
// Add endpoint IP to special "tasks.svc_name" so that the
|
||||
// applications have access to DNS RR.
|
||||
n.(*network).addSvcRecords("tasks."+name, ip, nil, false)
|
||||
for _, alias := range aliases {
|
||||
n.(*network).addSvcRecords("tasks."+alias, ip, nil, false)
|
||||
}
|
||||
|
||||
// Add service name to vip in DNS, if vip is valid. Otherwise resort to DNS RR
|
||||
svcIP := vip
|
||||
if len(svcIP) == 0 {
|
||||
svcIP = ip
|
||||
}
|
||||
n.(*network).addSvcRecords(name, svcIP, nil, false)
|
||||
for _, alias := range aliases {
|
||||
n.(*network).addSvcRecords(alias, svcIP, nil, false)
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
lb, ok := s.loadBalancers[nid]
|
||||
if !ok {
|
||||
// Create a new load balancer if we are seeing this
|
||||
// network attachment on the service for the first
|
||||
// time.
|
||||
lb = &loadBalancer{
|
||||
vip: vip,
|
||||
fwMark: fwMarkCtr,
|
||||
backEnds: make(map[string]net.IP),
|
||||
service: s,
|
||||
}
|
||||
|
||||
fwMarkCtrMu.Lock()
|
||||
fwMarkCtr++
|
||||
fwMarkCtrMu.Unlock()
|
||||
|
||||
s.loadBalancers[nid] = lb
|
||||
|
||||
// Since we just created this load balancer make sure
|
||||
// we add a new service service in IPVS rules.
|
||||
addService = true
|
||||
|
||||
}
|
||||
|
||||
lb.backEnds[eid] = ip
|
||||
|
||||
// Add loadbalancer service and backend in all sandboxes in
|
||||
// the network only if vip is valid.
|
||||
if len(vip) != 0 {
|
||||
n.(*network).addLBBackend(ip, vip, lb.fwMark, ingressPorts, addService)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, aliases []string, ip net.IP) error {
|
||||
var rmService bool
|
||||
|
||||
n, err := c.NetworkByID(nid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
skey := serviceKey{
|
||||
id: sid,
|
||||
ports: portConfigs(ingressPorts).String(),
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
s, ok := c.serviceBindings[skey]
|
||||
if !ok {
|
||||
c.Unlock()
|
||||
return nil
|
||||
}
|
||||
c.Unlock()
|
||||
|
||||
s.Lock()
|
||||
lb, ok := s.loadBalancers[nid]
|
||||
if !ok {
|
||||
s.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
_, ok = lb.backEnds[eid]
|
||||
if !ok {
|
||||
s.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
delete(lb.backEnds, eid)
|
||||
if len(lb.backEnds) == 0 {
|
||||
// All the backends for this service have been
|
||||
// removed. Time to remove the load balancer and also
|
||||
// remove the service entry in IPVS.
|
||||
rmService = true
|
||||
|
||||
delete(s.loadBalancers, nid)
|
||||
}
|
||||
|
||||
if len(s.loadBalancers) == 0 {
|
||||
// All loadbalancers for the service removed. Time to
|
||||
// remove the service itself.
|
||||
delete(c.serviceBindings, skey)
|
||||
}
|
||||
|
||||
// Remove loadbalancer service(if needed) and backend in all
|
||||
// sandboxes in the network only if the vip is valid.
|
||||
if len(vip) != 0 {
|
||||
n.(*network).rmLBBackend(ip, vip, lb.fwMark, ingressPorts, rmService)
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
// Delete the special "tasks.svc_name" backend record.
|
||||
n.(*network).deleteSvcRecords("tasks."+name, ip, nil, false)
|
||||
for _, alias := range aliases {
|
||||
n.(*network).deleteSvcRecords("tasks."+alias, ip, nil, false)
|
||||
}
|
||||
|
||||
// If we are doing DNS RR add the endpoint IP to DNS record
|
||||
// right away.
|
||||
if len(vip) == 0 {
|
||||
n.(*network).deleteSvcRecords(name, ip, nil, false)
|
||||
for _, alias := range aliases {
|
||||
n.(*network).deleteSvcRecords(alias, ip, nil, false)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the DNS record for VIP only if we are removing the service
|
||||
if rmService && len(vip) != 0 {
|
||||
n.(*network).deleteSvcRecords(name, vip, nil, false)
|
||||
for _, alias := range aliases {
|
||||
n.(*network).deleteSvcRecords(alias, vip, nil, false)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
216
vendor/github.com/docker/libnetwork/service_linux.go
generated
vendored
216
vendor/github.com/docker/libnetwork/service_linux.go
generated
vendored
|
@ -29,222 +29,6 @@ func init() {
|
|||
reexec.Register("redirecter", redirecter)
|
||||
}
|
||||
|
||||
func newService(name string, id string, ingressPorts []*PortConfig, aliases []string) *service {
|
||||
return &service{
|
||||
name: name,
|
||||
id: id,
|
||||
ingressPorts: ingressPorts,
|
||||
loadBalancers: make(map[string]*loadBalancer),
|
||||
aliases: aliases,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controller) cleanupServiceBindings(cleanupNID string) {
|
||||
var cleanupFuncs []func()
|
||||
|
||||
c.Lock()
|
||||
services := make([]*service, 0, len(c.serviceBindings))
|
||||
for _, s := range c.serviceBindings {
|
||||
services = append(services, s)
|
||||
}
|
||||
c.Unlock()
|
||||
|
||||
for _, s := range services {
|
||||
s.Lock()
|
||||
for nid, lb := range s.loadBalancers {
|
||||
if cleanupNID != "" && nid != cleanupNID {
|
||||
continue
|
||||
}
|
||||
|
||||
for eid, ip := range lb.backEnds {
|
||||
service := s
|
||||
loadBalancer := lb
|
||||
networkID := nid
|
||||
epID := eid
|
||||
epIP := ip
|
||||
|
||||
cleanupFuncs = append(cleanupFuncs, func() {
|
||||
if err := c.rmServiceBinding(service.name, service.id, networkID, epID, loadBalancer.vip,
|
||||
service.ingressPorts, service.aliases, epIP); err != nil {
|
||||
logrus.Errorf("Failed to remove service bindings for service %s network %s endpoint %s while cleanup: %v",
|
||||
service.id, networkID, epID, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
for _, f := range cleanupFuncs {
|
||||
f()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *controller) addServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, aliases []string, ip net.IP) error {
|
||||
var (
|
||||
s *service
|
||||
addService bool
|
||||
)
|
||||
|
||||
n, err := c.NetworkByID(nid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
skey := serviceKey{
|
||||
id: sid,
|
||||
ports: portConfigs(ingressPorts).String(),
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
s, ok := c.serviceBindings[skey]
|
||||
if !ok {
|
||||
// Create a new service if we are seeing this service
|
||||
// for the first time.
|
||||
s = newService(name, sid, ingressPorts, aliases)
|
||||
c.serviceBindings[skey] = s
|
||||
}
|
||||
c.Unlock()
|
||||
|
||||
// Add endpoint IP to special "tasks.svc_name" so that the
|
||||
// applications have access to DNS RR.
|
||||
n.(*network).addSvcRecords("tasks."+name, ip, nil, false)
|
||||
for _, alias := range aliases {
|
||||
n.(*network).addSvcRecords("tasks."+alias, ip, nil, false)
|
||||
}
|
||||
|
||||
// Add service name to vip in DNS, if vip is valid. Otherwise resort to DNS RR
|
||||
svcIP := vip
|
||||
if len(svcIP) == 0 {
|
||||
svcIP = ip
|
||||
}
|
||||
n.(*network).addSvcRecords(name, svcIP, nil, false)
|
||||
for _, alias := range aliases {
|
||||
n.(*network).addSvcRecords(alias, svcIP, nil, false)
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
lb, ok := s.loadBalancers[nid]
|
||||
if !ok {
|
||||
// Create a new load balancer if we are seeing this
|
||||
// network attachment on the service for the first
|
||||
// time.
|
||||
lb = &loadBalancer{
|
||||
vip: vip,
|
||||
fwMark: fwMarkCtr,
|
||||
backEnds: make(map[string]net.IP),
|
||||
service: s,
|
||||
}
|
||||
|
||||
fwMarkCtrMu.Lock()
|
||||
fwMarkCtr++
|
||||
fwMarkCtrMu.Unlock()
|
||||
|
||||
s.loadBalancers[nid] = lb
|
||||
|
||||
// Since we just created this load balancer make sure
|
||||
// we add a new service service in IPVS rules.
|
||||
addService = true
|
||||
|
||||
}
|
||||
|
||||
lb.backEnds[eid] = ip
|
||||
|
||||
// Add loadbalancer service and backend in all sandboxes in
|
||||
// the network only if vip is valid.
|
||||
if len(vip) != 0 {
|
||||
n.(*network).addLBBackend(ip, vip, lb.fwMark, ingressPorts, addService)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, aliases []string, ip net.IP) error {
|
||||
var rmService bool
|
||||
|
||||
n, err := c.NetworkByID(nid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
skey := serviceKey{
|
||||
id: sid,
|
||||
ports: portConfigs(ingressPorts).String(),
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
s, ok := c.serviceBindings[skey]
|
||||
if !ok {
|
||||
c.Unlock()
|
||||
return nil
|
||||
}
|
||||
c.Unlock()
|
||||
|
||||
s.Lock()
|
||||
lb, ok := s.loadBalancers[nid]
|
||||
if !ok {
|
||||
s.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
_, ok = lb.backEnds[eid]
|
||||
if !ok {
|
||||
s.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
delete(lb.backEnds, eid)
|
||||
if len(lb.backEnds) == 0 {
|
||||
// All the backends for this service have been
|
||||
// removed. Time to remove the load balancer and also
|
||||
// remove the service entry in IPVS.
|
||||
rmService = true
|
||||
|
||||
delete(s.loadBalancers, nid)
|
||||
}
|
||||
|
||||
if len(s.loadBalancers) == 0 {
|
||||
// All loadbalancers for the service removed. Time to
|
||||
// remove the service itself.
|
||||
delete(c.serviceBindings, skey)
|
||||
}
|
||||
|
||||
// Remove loadbalancer service(if needed) and backend in all
|
||||
// sandboxes in the network only if the vip is valid.
|
||||
if len(vip) != 0 {
|
||||
n.(*network).rmLBBackend(ip, vip, lb.fwMark, ingressPorts, rmService)
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
// Delete the special "tasks.svc_name" backend record.
|
||||
n.(*network).deleteSvcRecords("tasks."+name, ip, nil, false)
|
||||
for _, alias := range aliases {
|
||||
n.(*network).deleteSvcRecords("tasks."+alias, ip, nil, false)
|
||||
}
|
||||
|
||||
// If we are doing DNS RR add the endpoint IP to DNS record
|
||||
// right away.
|
||||
if len(vip) == 0 {
|
||||
n.(*network).deleteSvcRecords(name, ip, nil, false)
|
||||
for _, alias := range aliases {
|
||||
n.(*network).deleteSvcRecords(alias, ip, nil, false)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the DNS record for VIP only if we are removing the service
|
||||
if rmService && len(vip) != 0 {
|
||||
n.(*network).deleteSvcRecords(name, vip, nil, false)
|
||||
for _, alias := range aliases {
|
||||
n.(*network).deleteSvcRecords(alias, vip, nil, false)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get all loadbalancers on this network that is currently discovered
|
||||
// on this node.
|
||||
func (n *network) connectedLoadbalancers() []*loadBalancer {
|
||||
|
|
2
vendor/github.com/docker/libnetwork/service_unsupported.go
generated
vendored
2
vendor/github.com/docker/libnetwork/service_unsupported.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !linux
|
||||
// +build !linux,!windows
|
||||
|
||||
package libnetwork
|
||||
|
||||
|
|
15
vendor/github.com/docker/libnetwork/service_windows.go
generated
vendored
Normal file
15
vendor/github.com/docker/libnetwork/service_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package libnetwork
|
||||
|
||||
import "net"
|
||||
|
||||
func (n *network) addLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, addService bool) {
|
||||
}
|
||||
|
||||
func (n *network) rmLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, rmService bool) {
|
||||
}
|
||||
|
||||
func (sb *sandbox) populateLoadbalancers(ep *endpoint) {
|
||||
}
|
||||
|
||||
func arrangeIngressFilterRule() {
|
||||
}
|
24
vendor/github.com/docker/libnetwork/store.go
generated
vendored
24
vendor/github.com/docker/libnetwork/store.go
generated
vendored
|
@ -3,7 +3,7 @@ package libnetwork
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libkv/store/boltdb"
|
||||
"github.com/docker/libkv/store/consul"
|
||||
"github.com/docker/libkv/store/etcd"
|
||||
|
@ -85,7 +85,7 @@ func (c *controller) getNetworkFromStore(nid string) (*network, error) {
|
|||
// Continue searching in the next store if the key is not found in this store
|
||||
if err != nil {
|
||||
if err != datastore.ErrKeyNotFound {
|
||||
log.Debugf("could not find network %s: %v", nid, err)
|
||||
logrus.Debugf("could not find network %s: %v", nid, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ func (c *controller) getNetworksForScope(scope string) ([]*network, error) {
|
|||
ec := &endpointCnt{n: n}
|
||||
err = store.GetObject(datastore.Key(ec.Key()...), ec)
|
||||
if err != nil && !n.inDelete {
|
||||
log.Warnf("Could not find endpoint count key %s for network %s while listing: %v", datastore.Key(ec.Key()...), n.Name(), err)
|
||||
logrus.Warnf("Could not find endpoint count key %s for network %s while listing: %v", datastore.Key(ec.Key()...), n.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -147,7 +147,7 @@ func (c *controller) getNetworksFromStore() ([]*network, error) {
|
|||
// Continue searching in the next store if no keys found in this store
|
||||
if err != nil {
|
||||
if err != datastore.ErrKeyNotFound {
|
||||
log.Debugf("failed to get networks for scope %s: %v", store.Scope(), err)
|
||||
logrus.Debugf("failed to get networks for scope %s: %v", store.Scope(), err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ func (c *controller) getNetworksFromStore() ([]*network, error) {
|
|||
ec := &endpointCnt{n: n}
|
||||
err = store.GetObject(datastore.Key(ec.Key()...), ec)
|
||||
if err != nil && !n.inDelete {
|
||||
log.Warnf("could not find endpoint count key %s for network %s while listing: %v", datastore.Key(ec.Key()...), n.Name(), err)
|
||||
logrus.Warnf("could not find endpoint count key %s for network %s while listing: %v", datastore.Key(ec.Key()...), n.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ func (n *network) getEndpointFromStore(eid string) (*endpoint, error) {
|
|||
if err != nil {
|
||||
if err != datastore.ErrKeyNotFound {
|
||||
errors = append(errors, fmt.Sprintf("{%s:%v}, ", store.Scope(), err))
|
||||
log.Debugf("could not find endpoint %s in %s: %v", eid, store.Scope(), err)
|
||||
logrus.Debugf("could not find endpoint %s in %s: %v", eid, store.Scope(), err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ func (n *network) getEndpointsFromStore() ([]*endpoint, error) {
|
|||
// Continue searching in the next store if no keys found in this store
|
||||
if err != nil {
|
||||
if err != datastore.ErrKeyNotFound {
|
||||
log.Debugf("failed to get endpoints for network %s scope %s: %v",
|
||||
logrus.Debugf("failed to get endpoints for network %s scope %s: %v",
|
||||
n.Name(), store.Scope(), err)
|
||||
}
|
||||
continue
|
||||
|
@ -396,7 +396,7 @@ func (c *controller) processEndpointCreate(nmap map[string]*netWatch, ep *endpoi
|
|||
|
||||
ch, err := store.Watch(ep.getNetwork().getEpCnt(), nw.stopCh)
|
||||
if err != nil {
|
||||
log.Warnf("Error creating watch for network: %v", err)
|
||||
logrus.Warnf("Error creating watch for network: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -459,15 +459,15 @@ func (c *controller) startWatch() {
|
|||
func (c *controller) networkCleanup() {
|
||||
networks, err := c.getNetworksFromStore()
|
||||
if err != nil {
|
||||
log.Warnf("Could not retrieve networks from store(s) during network cleanup: %v", err)
|
||||
logrus.Warnf("Could not retrieve networks from store(s) during network cleanup: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, n := range networks {
|
||||
if n.inDelete {
|
||||
log.Infof("Removing stale network %s (%s)", n.Name(), n.ID())
|
||||
logrus.Infof("Removing stale network %s (%s)", n.Name(), n.ID())
|
||||
if err := n.delete(true); err != nil {
|
||||
log.Debugf("Error while removing stale network: %v", err)
|
||||
logrus.Debugf("Error while removing stale network: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -476,7 +476,7 @@ func (c *controller) networkCleanup() {
|
|||
var populateSpecial NetworkWalker = func(nw Network) bool {
|
||||
if n := nw.(*network); n.hasSpecialDriver() {
|
||||
if err := n.getController().addNetwork(n); err != nil {
|
||||
log.Warnf("Failed to populate network %q with driver %q", nw.Name(), nw.Type())
|
||||
logrus.Warnf("Failed to populate network %q with driver %q", nw.Name(), nw.Type())
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// +build linux windows
|
||||
|
||||
package networkallocator
|
||||
|
||||
import (
|
|
@ -1,4 +1,4 @@
|
|||
// +build !linux,!darwin
|
||||
// +build !linux,!darwin,!windows
|
||||
|
||||
package networkallocator
|
||||
|
||||
|
|
Loading…
Reference in a new issue