libnetwork: drop DatastoreConfig discovery type

The DatastoreConfig discovery type is unused. Remove the constant and
any resulting dead code. Today's biggest loser is the IPAM Allocator:
DatastoreConfig was the only type of discovery event it was listening
for, and there was no other place where a non-nil datastore could be
passed into the allocator. Strip out all the dead persistence code from
Allocator, leaving it as purely an in-memory implementation.

There is no more need to check the consistency of the allocator's
bit-sequences as there is no persistent storage for inconsistent bit
sequences to be loaded from.

Signed-off-by: Cory Snider <csnider@mirantis.com>
This commit is contained in:
Cory Snider 2023-01-24 18:49:24 -05:00
parent 28edc8e2d6
commit a08a254df3
10 changed files with 685 additions and 1503 deletions

View file

@ -16,8 +16,6 @@ type DiscoveryType int
const (
// NodeDiscovery represents Node join/leave events provided by discovery
NodeDiscovery = iota + 1
// DatastoreConfig represents an add/remove datastore event
DatastoreConfig
// EncryptionKeysConfig represents the initial key(s) for performing datapath encryption
EncryptionKeysConfig
// EncryptionKeysUpdate represents an update to the datapath encryption key(s)

View file

@ -319,7 +319,6 @@ func (d *driver) pushLocalEndpointEvent(action, nid, eid string) {
// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster
func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error {
var err error
switch dType {
case discoverapi.NodeDiscovery:
nodeData, ok := data.(discoverapi.NodeDiscoveryData)
@ -327,18 +326,6 @@ func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{})
return fmt.Errorf("invalid discovery data")
}
d.nodeJoin(nodeData.Address, nodeData.BindAddress, nodeData.Self)
case discoverapi.DatastoreConfig:
if d.store != nil {
return types.ForbiddenErrorf("cannot accept datastore configuration: Overlay driver has a datastore configured already")
}
dsc, ok := data.(discoverapi.DatastoreConfigData)
if !ok {
return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
}
d.store, err = datastore.NewDataStoreFromConfig(dsc)
if err != nil {
return types.InternalErrorf("failed to initialize data store: %v", err)
}
case discoverapi.EncryptionKeysConfig:
encrData, ok := data.(discoverapi.DriverEncryptionConfig)
if !ok {

View file

@ -7,8 +7,6 @@ import (
"sync"
"github.com/docker/docker/libnetwork/bitseq"
"github.com/docker/docker/libnetwork/datastore"
"github.com/docker/docker/libnetwork/discoverapi"
"github.com/docker/docker/libnetwork/ipamapi"
"github.com/docker/docker/libnetwork/types"
"github.com/sirupsen/logrus"
@ -17,9 +15,6 @@ import (
const (
localAddressSpace = "LocalDefault"
globalAddressSpace = "GlobalDefault"
// datastore keyes for ipam objects
dsConfigKey = "ipam/" + ipamapi.DefaultIPAM + "/config"
dsDataKey = "ipam/" + ipamapi.DefaultIPAM + "/data"
)
// Allocator provides per address space ipv4/ipv6 book keeping
@ -28,168 +23,37 @@ type Allocator struct {
// Separate from the addrSpace because they should not be serialized
predefined map[string][]*net.IPNet
predefinedStartIndices map[string]int
// The (potentially serialized) address spaces
// The address spaces
addrSpaces map[string]*addrSpace
// stores []datastore.Datastore
// Allocated addresses in each address space's subnet
addresses map[SubnetKey]*bitseq.Handle
sync.Mutex
}
// NewAllocator returns an instance of libnetwork ipam
func NewAllocator(lcDs, glDs datastore.DataStore, lcAs, glAs []*net.IPNet) (*Allocator, error) {
a := &Allocator{}
// Load predefined subnet pools
a.predefined = map[string][]*net.IPNet{
localAddressSpace: lcAs,
globalAddressSpace: glAs,
func NewAllocator(lcAs, glAs []*net.IPNet) (*Allocator, error) {
a := &Allocator{
predefined: map[string][]*net.IPNet{
localAddressSpace: lcAs,
globalAddressSpace: glAs,
},
predefinedStartIndices: map[string]int{},
addresses: map[SubnetKey]*bitseq.Handle{},
}
// Initialize asIndices map
a.predefinedStartIndices = make(map[string]int)
// Initialize bitseq map
a.addresses = make(map[SubnetKey]*bitseq.Handle)
// Initialize address spaces
a.addrSpaces = make(map[string]*addrSpace)
for _, aspc := range []struct {
as string
ds datastore.DataStore
}{
{localAddressSpace, lcDs},
{globalAddressSpace, glDs},
} {
a.initializeAddressSpace(aspc.as, aspc.ds)
a.addrSpaces = map[string]*addrSpace{
localAddressSpace: a.newAddressSpace(),
globalAddressSpace: a.newAddressSpace(),
}
return a, nil
}
func (a *Allocator) refresh(as string) error {
aSpace, err := a.getAddressSpaceFromStore(as)
if err != nil {
return types.InternalErrorf("error getting pools config from store: %v", err)
}
if aSpace == nil {
return nil
}
a.Lock()
a.addrSpaces[as] = aSpace
a.Unlock()
return nil
}
func (a *Allocator) updateBitMasks(aSpace *addrSpace) error {
var inserterList []func() error
aSpace.Lock()
for k, v := range aSpace.subnets {
if v.Range == nil {
kk := k
vv := v
inserterList = append(inserterList, func() error { return a.insertBitMask(kk, vv.Pool) })
}
}
aSpace.Unlock()
// Add the bitmasks (data could come from datastore)
for _, f := range inserterList {
if err := f(); err != nil {
return err
}
}
return nil
}
// Checks for and fixes damaged bitmask.
func (a *Allocator) checkConsistency(as string) {
var sKeyList []SubnetKey
// Retrieve this address space's configuration and bitmasks from the datastore
a.refresh(as)
a.Lock()
aSpace, ok := a.addrSpaces[as]
a.Unlock()
if !ok {
return
}
a.updateBitMasks(aSpace)
aSpace.Lock()
for sk, pd := range aSpace.subnets {
if pd.Range != nil {
continue
}
sKeyList = append(sKeyList, sk)
}
aSpace.Unlock()
for _, sk := range sKeyList {
a.Lock()
bm := a.addresses[sk]
a.Unlock()
if err := bm.CheckConsistency(); err != nil {
logrus.Warnf("Error while running consistency check for %s: %v", sk, err)
}
}
}
func (a *Allocator) initializeAddressSpace(as string, ds datastore.DataStore) error {
scope := ""
if ds != nil {
scope = ds.Scope()
}
a.Lock()
if currAS, ok := a.addrSpaces[as]; ok {
if currAS.ds != nil {
a.Unlock()
return types.ForbiddenErrorf("a datastore is already configured for the address space %s", as)
}
}
a.addrSpaces[as] = &addrSpace{
func (a *Allocator) newAddressSpace() *addrSpace {
return &addrSpace{
subnets: map[SubnetKey]*PoolData{},
id: dsConfigKey + "/" + as,
scope: scope,
ds: ds,
alloc: a,
}
a.Unlock()
a.checkConsistency(as)
return nil
}
// DiscoverNew informs the allocator about a new global scope datastore
func (a *Allocator) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error {
if dType != discoverapi.DatastoreConfig {
return nil
}
dsc, ok := data.(discoverapi.DatastoreConfigData)
if !ok {
return types.InternalErrorf("incorrect data in datastore update notification: %v", data)
}
ds, err := datastore.NewDataStoreFromConfig(dsc)
if err != nil {
return err
}
return a.initializeAddressSpace(globalAddressSpace, ds)
}
// DiscoverDelete is a notification of no interest for the allocator
func (a *Allocator) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error {
return nil
}
// GetDefaultAddressSpaces returns the local and global default address spaces
@ -220,10 +84,6 @@ retry:
k = &SubnetKey{AddressSpace: addressSpace, Subnet: nw.String()}
}
if err := a.refresh(addressSpace); err != nil {
return "", nil, nil, err
}
aSpace, err := a.getAddrSpace(addressSpace)
if err != nil {
return "", nil, nil, err
@ -238,14 +98,6 @@ retry:
return "", nil, nil, err
}
if err := a.writeToStore(aSpace); err != nil {
if _, ok := err.(types.RetryError); !ok {
return "", nil, nil, types.InternalErrorf("pool configuration failed because of %s", err.Error())
}
goto retry
}
return k.String(), nw, nil, insert()
}
@ -257,11 +109,6 @@ func (a *Allocator) ReleasePool(poolID string) error {
return types.BadRequestErrorf("invalid pool id: %s", poolID)
}
retry:
if err := a.refresh(k.AddressSpace); err != nil {
return err
}
aSpace, err := a.getAddrSpace(k.AddressSpace)
if err != nil {
return err
@ -272,13 +119,6 @@ retry:
return err
}
if err = a.writeToStore(aSpace); err != nil {
if _, ok := err.(types.RetryError); !ok {
return types.InternalErrorf("pool (%s) removal failed because of %v", poolID, err)
}
goto retry
}
return remove()
}
@ -289,7 +129,7 @@ func (a *Allocator) getAddrSpace(as string) (*addrSpace, error) {
defer a.Unlock()
aSpace, ok := a.addrSpaces[as]
if !ok {
return nil, types.BadRequestErrorf("cannot find address space %s (most likely the backing datastore is not configured)", as)
return nil, types.BadRequestErrorf("cannot find address space %s", as)
}
return aSpace, nil
}
@ -331,7 +171,6 @@ func (a *Allocator) parsePoolRequest(addressSpace, pool, subPool string, v6 bool
func (a *Allocator) insertBitMask(key SubnetKey, pool *net.IPNet) error {
//logrus.Debugf("Inserting bitmask (%s, %s)", key.String(), pool.String())
store := a.getStore(key.AddressSpace)
ipVer := getAddressVersion(pool.IP)
ones, bits := pool.Mask.Size()
numAddresses := uint64(1 << uint(bits-ones))
@ -341,8 +180,8 @@ func (a *Allocator) insertBitMask(key SubnetKey, pool *net.IPNet) error {
numAddresses--
}
// Generate the new address masks. AddressMask content may come from datastore
h, err := bitseq.NewHandle(dsDataKey, store, key.String(), numAddresses)
// Generate the new address masks.
h, err := bitseq.NewHandle("", nil, "", numAddresses)
if err != nil {
return err
}
@ -372,7 +211,7 @@ func (a *Allocator) retrieveBitmask(k SubnetKey, n *net.IPNet) (*bitseq.Handle,
if !ok {
logrus.Debugf("Retrieving bitmask (%s, %s)", k.String(), n.String())
if err := a.insertBitMask(k, n); err != nil {
return nil, types.InternalErrorf("could not find bitmask in datastore for %s", k.String())
return nil, types.InternalErrorf("could not find bitmask for %s", k.String())
}
a.Lock()
bm = a.addresses[k]
@ -452,10 +291,6 @@ func (a *Allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[s
return nil, nil, types.BadRequestErrorf("invalid pool id: %s", poolID)
}
if err := a.refresh(k.AddressSpace); err != nil {
return nil, nil, err
}
aSpace, err := a.getAddrSpace(k.AddressSpace)
if err != nil {
return nil, nil, err
@ -482,7 +317,7 @@ func (a *Allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[s
bm, err := a.retrieveBitmask(k, c.Pool)
if err != nil {
return nil, nil, types.InternalErrorf("could not find bitmask in datastore for %s on address %v request from pool %s: %v",
return nil, nil, types.InternalErrorf("could not find bitmask for %s on address %v request from pool %s: %v",
k.String(), prefAddress, poolID, err)
}
// In order to request for a serial ip address allocation, callers can pass in the option to request
@ -509,10 +344,6 @@ func (a *Allocator) ReleaseAddress(poolID string, address net.IP) error {
return types.BadRequestErrorf("invalid pool id: %s", poolID)
}
if err := a.refresh(k.AddressSpace); err != nil {
return err
}
aSpace, err := a.getAddrSpace(k.AddressSpace)
if err != nil {
return err
@ -551,7 +382,7 @@ func (a *Allocator) ReleaseAddress(poolID string, address net.IP) error {
bm, err := a.retrieveBitmask(k, c.Pool)
if err != nil {
return types.InternalErrorf("could not find bitmask in datastore for %s on address %v release from pool %s: %v",
return types.InternalErrorf("could not find bitmask for %s on address %v release from pool %s: %v",
k.String(), address, poolID, err)
}
defer logrus.Debugf("Released address PoolID:%s, Address:%v Sequence:%s", poolID, address, bm.String())

File diff suppressed because it is too large Load diff

View file

@ -12,6 +12,7 @@ import (
"time"
"github.com/docker/docker/libnetwork/ipamapi"
"github.com/docker/docker/libnetwork/ipamutils"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
"gotest.tools/v3/assert"
@ -36,15 +37,12 @@ type testContext struct {
}
func newTestContext(t *testing.T, mask int, options map[string]string) *testContext {
a, err := getAllocator(false)
a, err := NewAllocator(ipamutils.GetLocalScopeDefaultNetworks(), ipamutils.GetGlobalScopeDefaultNetworks())
if err != nil {
t.Fatal(err)
}
a.addrSpaces["giallo"] = &addrSpace{
id: dsConfigKey + "/" + "giallo",
ds: a.addrSpaces[localAddressSpace].ds,
alloc: a.addrSpaces[localAddressSpace].alloc,
scope: a.addrSpaces[localAddressSpace].scope,
subnets: map[SubnetKey]*PoolData{},
}
@ -84,7 +82,7 @@ func (o *op) String() string {
}
func TestRequestPoolParallel(t *testing.T) {
a, err := getAllocator(false)
a, err := NewAllocator(ipamutils.GetLocalScopeDefaultNetworks(), ipamutils.GetGlobalScopeDefaultNetworks())
if err != nil {
t.Fatal(err)
}

View file

@ -1,125 +0,0 @@
package ipam
import (
"encoding/json"
"github.com/docker/docker/libnetwork/datastore"
"github.com/docker/docker/libnetwork/types"
"github.com/sirupsen/logrus"
)
// Key provides the Key to be used in KV Store
func (aSpace *addrSpace) Key() []string {
aSpace.Lock()
defer aSpace.Unlock()
return []string{aSpace.id}
}
// KeyPrefix returns the immediate parent key that can be used for tree walk
func (aSpace *addrSpace) KeyPrefix() []string {
aSpace.Lock()
defer aSpace.Unlock()
return []string{dsConfigKey}
}
// Value marshals the data to be stored in the KV store
func (aSpace *addrSpace) Value() []byte {
b, err := json.Marshal(aSpace)
if err != nil {
logrus.Warnf("Failed to marshal ipam configured pools: %v", err)
return nil
}
return b
}
// SetValue unmarshalls the data from the KV store.
func (aSpace *addrSpace) SetValue(value []byte) error {
rc := &addrSpace{subnets: make(map[SubnetKey]*PoolData)}
if err := json.Unmarshal(value, rc); err != nil {
return err
}
aSpace.subnets = rc.subnets
return nil
}
// Index returns the latest DB Index as seen by this object
func (aSpace *addrSpace) Index() uint64 {
aSpace.Lock()
defer aSpace.Unlock()
return aSpace.dbIndex
}
// SetIndex method allows the datastore to store the latest DB Index into this object
func (aSpace *addrSpace) SetIndex(index uint64) {
aSpace.Lock()
aSpace.dbIndex = index
aSpace.dbExists = true
aSpace.Unlock()
}
// Exists method is true if this object has been stored in the DB.
func (aSpace *addrSpace) Exists() bool {
aSpace.Lock()
defer aSpace.Unlock()
return aSpace.dbExists
}
// Skip provides a way for a KV Object to avoid persisting it in the KV Store
func (aSpace *addrSpace) Skip() bool {
return false
}
func (a *Allocator) getStore(as string) datastore.DataStore {
a.Lock()
defer a.Unlock()
if aSpace, ok := a.addrSpaces[as]; ok {
return aSpace.ds
}
return nil
}
func (a *Allocator) getAddressSpaceFromStore(as string) (*addrSpace, error) {
store := a.getStore(as)
// IPAM may not have a valid store. In such cases it is just in-memory state.
if store == nil {
return nil, nil
}
pc := &addrSpace{id: dsConfigKey + "/" + as, ds: store, alloc: a}
if err := store.GetObject(datastore.Key(pc.Key()...), pc); err != nil {
if err == datastore.ErrKeyNotFound {
return nil, nil
}
return nil, types.InternalErrorf("could not get pools config from store: %v", err)
}
return pc, nil
}
func (a *Allocator) writeToStore(aSpace *addrSpace) error {
store := aSpace.store()
// IPAM may not have a valid store. In such cases it is just in-memory state.
if store == nil {
return nil
}
err := store.PutObjectAtomic(aSpace)
if err == datastore.ErrKeyModified {
return types.RetryErrorf("failed to perform atomic write (%v). retry might fix the error", err)
}
return err
}
// DataScope method returns the storage scope of the datastore
func (aSpace *addrSpace) DataScope() string {
aSpace.Lock()
defer aSpace.Unlock()
return aSpace.scope
}

View file

@ -1,13 +1,11 @@
package ipam
import (
"encoding/json"
"fmt"
"net"
"strings"
"sync"
"github.com/docker/docker/libnetwork/datastore"
"github.com/docker/docker/libnetwork/ipamapi"
"github.com/docker/docker/libnetwork/types"
)
@ -29,13 +27,8 @@ type PoolData struct {
// addrSpace contains the pool configurations for the address space
type addrSpace struct {
subnets map[SubnetKey]*PoolData
dbIndex uint64
dbExists bool
id string
scope string
ds datastore.DataStore
alloc *Allocator
subnets map[SubnetKey]*PoolData
alloc *Allocator
sync.Mutex
}
@ -51,31 +44,6 @@ func (r *AddressRange) String() string {
return fmt.Sprintf("Sub: %s, range [%d, %d]", r.Sub, r.Start, r.End)
}
// MarshalJSON returns the JSON encoding of the Range object
func (r *AddressRange) MarshalJSON() ([]byte, error) {
m := map[string]interface{}{
"Sub": r.Sub.String(),
"Start": r.Start,
"End": r.End,
}
return json.Marshal(m)
}
// UnmarshalJSON decodes data into the Range object
func (r *AddressRange) UnmarshalJSON(data []byte) error {
m := map[string]interface{}{}
err := json.Unmarshal(data, &m)
if err != nil {
return err
}
if r.Sub, err = types.ParseCIDR(m["Sub"].(string)); err != nil {
return err
}
r.Start = uint64(m["Start"].(float64))
r.End = uint64(m["End"].(float64))
return nil
}
// String returns the string form of the SubnetKey object
func (s *SubnetKey) String() string {
k := fmt.Sprintf("%s/%s", s.AddressSpace, s.Subnet)
@ -110,153 +78,6 @@ func (p *PoolData) String() string {
p.ParentKey.String(), p.Pool.String(), p.Range, p.RefCount)
}
// MarshalJSON returns the JSON encoding of the PoolData object
func (p *PoolData) MarshalJSON() ([]byte, error) {
m := map[string]interface{}{
"ParentKey": p.ParentKey,
"RefCount": p.RefCount,
}
if p.Pool != nil {
m["Pool"] = p.Pool.String()
}
if p.Range != nil {
m["Range"] = p.Range
}
return json.Marshal(m)
}
// UnmarshalJSON decodes data into the PoolData object
func (p *PoolData) UnmarshalJSON(data []byte) error {
var (
err error
t struct {
ParentKey SubnetKey
Pool string
Range *AddressRange `json:",omitempty"`
RefCount int
}
)
if err = json.Unmarshal(data, &t); err != nil {
return err
}
p.ParentKey = t.ParentKey
p.Range = t.Range
p.RefCount = t.RefCount
if t.Pool != "" {
if p.Pool, err = types.ParseCIDR(t.Pool); err != nil {
return err
}
}
return nil
}
// MarshalJSON returns the JSON encoding of the addrSpace object
func (aSpace *addrSpace) MarshalJSON() ([]byte, error) {
aSpace.Lock()
defer aSpace.Unlock()
m := map[string]interface{}{
"Scope": aSpace.scope,
}
if aSpace.subnets != nil {
s := map[string]*PoolData{}
for k, v := range aSpace.subnets {
s[k.String()] = v
}
m["Subnets"] = s
}
return json.Marshal(m)
}
// UnmarshalJSON decodes data into the addrSpace object
func (aSpace *addrSpace) UnmarshalJSON(data []byte) error {
aSpace.Lock()
defer aSpace.Unlock()
m := map[string]interface{}{}
err := json.Unmarshal(data, &m)
if err != nil {
return err
}
aSpace.scope = datastore.LocalScope
s := m["Scope"].(string)
if s == datastore.GlobalScope {
aSpace.scope = datastore.GlobalScope
}
if v, ok := m["Subnets"]; ok {
sb, _ := json.Marshal(v)
var s map[string]*PoolData
err := json.Unmarshal(sb, &s)
if err != nil {
return err
}
for ks, v := range s {
k := SubnetKey{}
k.FromString(ks)
aSpace.subnets[k] = v
}
}
return nil
}
// CopyTo deep copies the pool data to the destination pooldata
func (p *PoolData) CopyTo(dstP *PoolData) error {
dstP.ParentKey = p.ParentKey
dstP.Pool = types.GetIPNetCopy(p.Pool)
if p.Range != nil {
dstP.Range = &AddressRange{}
dstP.Range.Sub = types.GetIPNetCopy(p.Range.Sub)
dstP.Range.Start = p.Range.Start
dstP.Range.End = p.Range.End
}
dstP.RefCount = p.RefCount
return nil
}
func (aSpace *addrSpace) CopyTo(o datastore.KVObject) error {
aSpace.Lock()
defer aSpace.Unlock()
dstAspace := o.(*addrSpace)
dstAspace.id = aSpace.id
dstAspace.ds = aSpace.ds
dstAspace.alloc = aSpace.alloc
dstAspace.scope = aSpace.scope
dstAspace.dbIndex = aSpace.dbIndex
dstAspace.dbExists = aSpace.dbExists
dstAspace.subnets = make(map[SubnetKey]*PoolData)
for k, v := range aSpace.subnets {
dstAspace.subnets[k] = &PoolData{}
v.CopyTo(dstAspace.subnets[k])
}
return nil
}
func (aSpace *addrSpace) New() datastore.KVObject {
aSpace.Lock()
defer aSpace.Unlock()
return &addrSpace{
id: aSpace.id,
ds: aSpace.ds,
alloc: aSpace.alloc,
scope: aSpace.scope,
}
}
// updatePoolDBOnAdd returns a closure which will add the subnet k to the address space when executed.
func (aSpace *addrSpace) updatePoolDBOnAdd(k SubnetKey, nw *net.IPNet, ipr *AddressRange, pdf bool) (func() error, error) {
aSpace.Lock()
@ -355,10 +176,3 @@ func (aSpace *addrSpace) contains(space string, nw *net.IPNet) bool {
}
return false
}
func (aSpace *addrSpace) store() datastore.DataStore {
aSpace.Lock()
defer aSpace.Unlock()
return aSpace.ds
}

View file

@ -4,7 +4,6 @@ package ipamapi
import (
"net"
"github.com/docker/docker/libnetwork/discoverapi"
"github.com/docker/docker/libnetwork/types"
"github.com/docker/docker/pkg/plugingetter"
)
@ -59,8 +58,6 @@ var (
// Ipam represents the interface the IPAM service plugins must implement
// in order to allow injection/modification of IPAM database.
type Ipam interface {
discoverapi.Discover
// GetDefaultAddressSpaces returns the default local and global address spaces for this ipam
GetDefaultAddressSpaces() (string, string, error)
// RequestPool returns an address pool along with its unique id. Address space is a mandatory field

View file

@ -22,7 +22,7 @@ func registerBuiltin(ic ipamapi.Registerer) error {
localAddressPool = ipamutils.GetLocalScopeDefaultNetworks()
}
a, err := ipam.NewAllocator(nil, nil, localAddressPool, ipamutils.GetGlobalScopeDefaultNetworks())
a, err := ipam.NewAllocator(localAddressPool, ipamutils.GetGlobalScopeDefaultNetworks())
if err != nil {
return err
}

View file

@ -3,7 +3,6 @@ package windowsipam
import (
"net"
"github.com/docker/docker/libnetwork/discoverapi"
"github.com/docker/docker/libnetwork/ipamapi"
"github.com/docker/docker/libnetwork/types"
"github.com/sirupsen/logrus"
@ -85,16 +84,6 @@ func (a *allocator) ReleaseAddress(poolID string, address net.IP) error {
return nil
}
// DiscoverNew informs the allocator about a new global scope datastore
func (a *allocator) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error {
return nil
}
// DiscoverDelete is a notification of no interest for the allocator
func (a *allocator) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error {
return nil
}
func (a *allocator) IsBuiltIn() bool {
return true
}