Browse Source

Merge pull request #2177 from ctelfer/subnet-cursor

Avoid reusing subnets when allocating from pools
Flavio Crisciani 7 years ago
parent
commit
828a6d788e
2 changed files with 72 additions and 9 deletions
  1. 33 9
      libnetwork/ipam/allocator.go
  2. 39 0
      libnetwork/ipam/allocator_test.go

+ 33 - 9
libnetwork/ipam/allocator.go

@@ -29,7 +29,10 @@ const (
 // Allocator provides per address space ipv4/ipv6 book keeping
 // Allocator provides per address space ipv4/ipv6 book keeping
 type Allocator struct {
 type Allocator struct {
 	// Predefined pools for default address spaces
 	// Predefined pools for default address spaces
-	predefined map[string][]*net.IPNet
+	// Separate from the addrSpace because they should not be serialized
+	predefined             map[string][]*net.IPNet
+	predefinedStartIndices map[string]int
+	// The (potentially serialized) address spaces
 	addrSpaces map[string]*addrSpace
 	addrSpaces map[string]*addrSpace
 	// stores        []datastore.Datastore
 	// stores        []datastore.Datastore
 	// Allocated addresses in each address space's subnet
 	// Allocated addresses in each address space's subnet
@@ -47,6 +50,9 @@ func NewAllocator(lcDs, glDs datastore.DataStore) (*Allocator, error) {
 		globalAddressSpace: ipamutils.PredefinedGranularNetworks,
 		globalAddressSpace: ipamutils.PredefinedGranularNetworks,
 	}
 	}
 
 
+	// Initialize asIndices map
+	a.predefinedStartIndices = make(map[string]int)
+
 	// Initialize bitseq map
 	// Initialize bitseq map
 	a.addresses = make(map[SubnetKey]*bitseq.Handle)
 	a.addresses = make(map[SubnetKey]*bitseq.Handle)
 
 
@@ -374,11 +380,24 @@ func (a *Allocator) retrieveBitmask(k SubnetKey, n *net.IPNet) (*bitseq.Handle,
 func (a *Allocator) getPredefineds(as string) []*net.IPNet {
 func (a *Allocator) getPredefineds(as string) []*net.IPNet {
 	a.Lock()
 	a.Lock()
 	defer a.Unlock()
 	defer a.Unlock()
-	l := make([]*net.IPNet, 0, len(a.predefined[as]))
-	for _, pool := range a.predefined[as] {
-		l = append(l, pool)
+
+	p := a.predefined[as]
+	i := a.predefinedStartIndices[as]
+	// defensive in case the list changed since last update
+	if i >= len(p) {
+		i = 0
 	}
 	}
-	return l
+	return append(p[i:], p[:i]...)
+}
+
+func (a *Allocator) updateStartIndex(as string, amt int) {
+	a.Lock()
+	i := a.predefinedStartIndices[as] + amt
+	if i < 0 || i >= len(a.predefined[as]) {
+		i = 0
+	}
+	a.predefinedStartIndices[as] = i
+	a.Unlock()
 }
 }
 
 
 func (a *Allocator) getPredefinedPool(as string, ipV6 bool) (*net.IPNet, error) {
 func (a *Allocator) getPredefinedPool(as string, ipV6 bool) (*net.IPNet, error) {
@@ -397,21 +416,26 @@ func (a *Allocator) getPredefinedPool(as string, ipV6 bool) (*net.IPNet, error)
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	for _, nw := range a.getPredefineds(as) {
+	predefined := a.getPredefineds(as)
+
+	aSpace.Lock()
+	for i, nw := range predefined {
 		if v != getAddressVersion(nw.IP) {
 		if v != getAddressVersion(nw.IP) {
 			continue
 			continue
 		}
 		}
-		aSpace.Lock()
+		// Checks whether pool has already been allocated
 		if _, ok := aSpace.subnets[SubnetKey{AddressSpace: as, Subnet: nw.String()}]; ok {
 		if _, ok := aSpace.subnets[SubnetKey{AddressSpace: as, Subnet: nw.String()}]; ok {
-			aSpace.Unlock()
 			continue
 			continue
 		}
 		}
+		// Shouldn't be necessary, but check prevents IP collisions should
+		// predefined pools overlap for any reason.
 		if !aSpace.contains(as, nw) {
 		if !aSpace.contains(as, nw) {
 			aSpace.Unlock()
 			aSpace.Unlock()
+			a.updateStartIndex(as, i+1)
 			return nw, nil
 			return nw, nil
 		}
 		}
-		aSpace.Unlock()
 	}
 	}
+	aSpace.Unlock()
 
 
 	return nil, types.NotFoundErrorf("could not find an available, non-overlapping IPv%d address pool among the defaults to assign to the network", v)
 	return nil, types.NotFoundErrorf("could not find an available, non-overlapping IPv%d address pool among the defaults to assign to the network", v)
 }
 }

+ 39 - 0
libnetwork/ipam/allocator_test.go

@@ -555,6 +555,45 @@ func TestGetSameAddress(t *testing.T) {
 	}
 	}
 }
 }
 
 
+func TestPoolAllocationReuse(t *testing.T) {
+	for _, store := range []bool{false, true} {
+		a, err := getAllocator(store)
+		assert.NoError(t, err)
+
+		// First get all pools until they are exhausted to
+		pList := []string{}
+		pool, _, _, err := a.RequestPool(localAddressSpace, "", "", nil, false)
+		for err == nil {
+			pList = append(pList, pool)
+			pool, _, _, err = a.RequestPool(localAddressSpace, "", "", nil, false)
+		}
+		nPools := len(pList)
+		for _, pool := range pList {
+			if err := a.ReleasePool(pool); err != nil {
+				t.Fatal(err)
+			}
+		}
+
+		// Now try to allocate then free nPool pools sequentially.
+		// Verify that we don't see any repeat networks even though
+		// we have freed them.
+		seen := map[string]bool{}
+		for i := 0; i < nPools; i++ {
+			pool, nw, _, err := a.RequestPool(localAddressSpace, "", "", nil, false)
+			if err != nil {
+				t.Fatal(err)
+			}
+			if _, ok := seen[nw.String()]; ok {
+				t.Fatalf("Network %s was reused before exhausing the pool list", nw.String())
+			}
+			seen[nw.String()] = true
+			if err := a.ReleasePool(pool); err != nil {
+				t.Fatal(err)
+			}
+		}
+	}
+}
+
 func TestGetAddressSubPoolEqualPool(t *testing.T) {
 func TestGetAddressSubPoolEqualPool(t *testing.T) {
 	for _, store := range []bool{false, true} {
 	for _, store := range []bool{false, true} {
 		a, err := getAllocator(store)
 		a, err := getAllocator(store)