Bläddra i källkod

Merge pull request #3790 from crosbymichael/move-port-allocators

Move port allocators into network driver
Victor Vieux 11 år sedan
förälder
incheckning
f4b75e06bc

+ 22 - 135
network.go

@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"github.com/dotcloud/docker/networkdriver"
 	"github.com/dotcloud/docker/networkdriver/ipallocator"
+	"github.com/dotcloud/docker/networkdriver/portallocator"
 	"github.com/dotcloud/docker/pkg/iptables"
 	"github.com/dotcloud/docker/pkg/netlink"
 	"github.com/dotcloud/docker/proxy"
@@ -11,7 +12,6 @@ import (
 	"log"
 	"net"
 	"strconv"
-	"sync"
 	"syscall"
 	"unsafe"
 )
@@ -20,8 +20,6 @@ const (
 	DefaultNetworkBridge = "docker0"
 	DisableNetworkBridge = "none"
 	DefaultNetworkMtu    = 1500
-	portRangeStart       = 49153
-	portRangeEnd         = 65535
 	siocBRADDBR          = 0x89a0
 )
 
@@ -283,76 +281,6 @@ func newPortMapper(config *DaemonConfig) (*PortMapper, error) {
 	return mapper, nil
 }
 
-// Port allocator: Automatically allocate and release networking ports
-type PortAllocator struct {
-	sync.Mutex
-	inUse    map[string]struct{}
-	fountain chan int
-	quit     chan bool
-}
-
-func (alloc *PortAllocator) runFountain() {
-	for {
-		for port := portRangeStart; port < portRangeEnd; port++ {
-			select {
-			case alloc.fountain <- port:
-			case quit := <-alloc.quit:
-				if quit {
-					return
-				}
-			}
-		}
-	}
-}
-
-// FIXME: Release can no longer fail, change its prototype to reflect that.
-func (alloc *PortAllocator) Release(addr net.IP, port int) error {
-	mapKey := (&net.TCPAddr{Port: port, IP: addr}).String()
-	utils.Debugf("Releasing %d", port)
-	alloc.Lock()
-	delete(alloc.inUse, mapKey)
-	alloc.Unlock()
-	return nil
-}
-
-func (alloc *PortAllocator) Acquire(addr net.IP, port int) (int, error) {
-	mapKey := (&net.TCPAddr{Port: port, IP: addr}).String()
-	utils.Debugf("Acquiring %s", mapKey)
-	if port == 0 {
-		// Allocate a port from the fountain
-		for port := range alloc.fountain {
-			if _, err := alloc.Acquire(addr, port); err == nil {
-				return port, nil
-			}
-		}
-		return -1, fmt.Errorf("Port generator ended unexpectedly")
-	}
-	alloc.Lock()
-	defer alloc.Unlock()
-	if _, inUse := alloc.inUse[mapKey]; inUse {
-		return -1, fmt.Errorf("Port already in use: %d", port)
-	}
-	alloc.inUse[mapKey] = struct{}{}
-	return port, nil
-}
-
-func (alloc *PortAllocator) Close() error {
-	alloc.quit <- true
-	close(alloc.quit)
-	close(alloc.fountain)
-	return nil
-}
-
-func newPortAllocator() (*PortAllocator, error) {
-	allocator := &PortAllocator{
-		inUse:    make(map[string]struct{}),
-		fountain: make(chan int),
-		quit:     make(chan bool),
-	}
-	go allocator.runFountain()
-	return allocator, nil
-}
-
 // Network interface represents the networking stack of a container
 type NetworkInterface struct {
 	IPNet   net.IPNet
@@ -390,30 +318,24 @@ func (iface *NetworkInterface) AllocatePort(port Port, binding PortBinding) (*Na
 
 	hostPort, _ := parsePort(nat.Binding.HostPort)
 
-	if nat.Port.Proto() == "tcp" {
-		extPort, err := iface.manager.tcpPortAllocator.Acquire(ip, hostPort)
-		if err != nil {
-			return nil, err
-		}
+	extPort, err := portallocator.RequestPort(ip, nat.Port.Proto(), hostPort)
+	if err != nil {
+		return nil, err
+	}
 
-		backend := &net.TCPAddr{IP: iface.IPNet.IP, Port: containerPort}
-		if err := iface.manager.portMapper.Map(ip, extPort, backend); err != nil {
-			iface.manager.tcpPortAllocator.Release(ip, extPort)
-			return nil, err
-		}
-		nat.Binding.HostPort = strconv.Itoa(extPort)
+	var backend net.Addr
+	if nat.Port.Proto() == "tcp" {
+		backend = &net.TCPAddr{IP: iface.IPNet.IP, Port: containerPort}
 	} else {
-		extPort, err := iface.manager.udpPortAllocator.Acquire(ip, hostPort)
-		if err != nil {
-			return nil, err
-		}
-		backend := &net.UDPAddr{IP: iface.IPNet.IP, Port: containerPort}
-		if err := iface.manager.portMapper.Map(ip, extPort, backend); err != nil {
-			iface.manager.udpPortAllocator.Release(ip, extPort)
-			return nil, err
-		}
-		nat.Binding.HostPort = strconv.Itoa(extPort)
+		backend = &net.UDPAddr{IP: iface.IPNet.IP, Port: containerPort}
 	}
+
+	if err := iface.manager.portMapper.Map(ip, extPort, backend); err != nil {
+		portallocator.ReleasePort(ip, nat.Port.Proto(), extPort)
+		return nil, err
+	}
+
+	nat.Binding.HostPort = strconv.Itoa(extPort)
 	iface.extPorts = append(iface.extPorts, nat)
 
 	return nat, nil
@@ -446,14 +368,8 @@ func (iface *NetworkInterface) Release() {
 			log.Printf("Unable to unmap port %s: %s", nat, err)
 		}
 
-		if nat.Port.Proto() == "tcp" {
-			if err := iface.manager.tcpPortAllocator.Release(ip, hostPort); err != nil {
-				log.Printf("Unable to release port %s", nat)
-			}
-		} else if nat.Port.Proto() == "udp" {
-			if err := iface.manager.udpPortAllocator.Release(ip, hostPort); err != nil {
-				log.Printf("Unable to release port %s: %s", nat, err)
-			}
+		if err := portallocator.ReleasePort(ip, nat.Port.Proto(), hostPort); err != nil {
+			log.Printf("Unable to release port %s", nat)
 		}
 	}
 
@@ -468,9 +384,7 @@ type NetworkManager struct {
 	bridgeIface   string
 	bridgeNetwork *net.IPNet
 
-	tcpPortAllocator *PortAllocator
-	udpPortAllocator *PortAllocator
-	portMapper       *PortMapper
+	portMapper *PortMapper
 
 	disabled bool
 }
@@ -498,21 +412,6 @@ func (manager *NetworkManager) Allocate() (*NetworkInterface, error) {
 	return iface, nil
 }
 
-func (manager *NetworkManager) Close() error {
-	if manager.disabled {
-		return nil
-	}
-	err1 := manager.tcpPortAllocator.Close()
-	err2 := manager.udpPortAllocator.Close()
-	if err1 != nil {
-		return err1
-	}
-	if err2 != nil {
-		return err2
-	}
-	return nil
-}
-
 func newNetworkManager(config *DaemonConfig) (*NetworkManager, error) {
 	if config.BridgeIface == DisableNetworkBridge {
 		manager := &NetworkManager{
@@ -600,27 +499,15 @@ func newNetworkManager(config *DaemonConfig) (*NetworkManager, error) {
 		}
 	}
 
-	tcpPortAllocator, err := newPortAllocator()
-	if err != nil {
-		return nil, err
-	}
-
-	udpPortAllocator, err := newPortAllocator()
-	if err != nil {
-		return nil, err
-	}
-
 	portMapper, err := newPortMapper(config)
 	if err != nil {
 		return nil, err
 	}
 
 	manager := &NetworkManager{
-		bridgeIface:      config.BridgeIface,
-		bridgeNetwork:    network,
-		tcpPortAllocator: tcpPortAllocator,
-		udpPortAllocator: udpPortAllocator,
-		portMapper:       portMapper,
+		bridgeIface:   config.BridgeIface,
+		bridgeNetwork: network,
+		portMapper:    portMapper,
 	}
 
 	return manager, nil

+ 0 - 44
network_test.go

@@ -7,50 +7,6 @@ import (
 	"testing"
 )
 
-func TestPortAllocation(t *testing.T) {
-	ip := net.ParseIP("192.168.0.1")
-	ip2 := net.ParseIP("192.168.0.2")
-	allocator, err := newPortAllocator()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if port, err := allocator.Acquire(ip, 80); err != nil {
-		t.Fatal(err)
-	} else if port != 80 {
-		t.Fatalf("Acquire(80) should return 80, not %d", port)
-	}
-	port, err := allocator.Acquire(ip, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if port <= 0 {
-		t.Fatalf("Acquire(0) should return a non-zero port")
-	}
-	if _, err := allocator.Acquire(ip, port); err == nil {
-		t.Fatalf("Acquiring a port already in use should return an error")
-	}
-	if newPort, err := allocator.Acquire(ip, 0); err != nil {
-		t.Fatal(err)
-	} else if newPort == port {
-		t.Fatalf("Acquire(0) allocated the same port twice: %d", port)
-	}
-	if _, err := allocator.Acquire(ip, 80); err == nil {
-		t.Fatalf("Acquiring a port already in use should return an error")
-	}
-	if _, err := allocator.Acquire(ip2, 80); err != nil {
-		t.Fatalf("It should be possible to allocate the same port on a different interface")
-	}
-	if _, err := allocator.Acquire(ip2, 80); err == nil {
-		t.Fatalf("Acquiring a port already in use should return an error")
-	}
-	if err := allocator.Release(ip, 80); err != nil {
-		t.Fatal(err)
-	}
-	if _, err := allocator.Acquire(ip, 80); err != nil {
-		t.Fatal(err)
-	}
-}
-
 type StubProxy struct {
 	frontendAddr *net.Addr
 	backendAddr  *net.Addr

+ 4 - 3
networkdriver/ipallocator/allocator.go

@@ -4,11 +4,12 @@ import (
 	"encoding/binary"
 	"errors"
 	"github.com/dotcloud/docker/networkdriver"
+	"github.com/dotcloud/docker/pkg/collections"
 	"net"
 	"sync"
 )
 
-type networkSet map[string]*iPSet
+type networkSet map[string]*collections.OrderedIntSet
 
 var (
 	ErrNoAvailableIPs     = errors.New("no available ip addresses on network")
@@ -147,7 +148,7 @@ func intToIP(n int32) *net.IP {
 func checkAddress(address *net.IPNet) {
 	key := address.String()
 	if _, exists := allocatedIPs[key]; !exists {
-		allocatedIPs[key] = &iPSet{}
-		availableIPS[key] = &iPSet{}
+		allocatedIPs[key] = collections.NewOrderedIntSet()
+		availableIPS[key] = collections.NewOrderedIntSet()
 	}
 }

+ 165 - 0
networkdriver/portallocator/portallocator.go

@@ -0,0 +1,165 @@
+package portallocator
+
+import (
+	"errors"
+	"github.com/dotcloud/docker/pkg/collections"
+	"net"
+	"sync"
+)
+
+const (
+	BeginPortRange = 49153
+	EndPortRange   = 65535
+)
+
+type (
+	portMappings map[string]*collections.OrderedIntSet
+	ipMapping    map[string]portMappings
+)
+
+var (
+	ErrPortAlreadyAllocated = errors.New("port has already been allocated")
+	ErrPortExceedsRange     = errors.New("port exceeds upper range")
+	ErrUnknownProtocol      = errors.New("unknown protocol")
+)
+
+var (
+	currentDynamicPort = map[string]int{
+		"tcp": BeginPortRange - 1,
+		"udp": BeginPortRange - 1,
+	}
+	defaultIP             = net.ParseIP("0.0.0.0")
+	defaultAllocatedPorts = portMappings{}
+	otherAllocatedPorts   = ipMapping{}
+	lock                  = sync.Mutex{}
+)
+
+func init() {
+	defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet()
+	defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet()
+}
+
+// RequestPort returns an available port if the port is 0
+// If the provided port is not 0 then it will be checked if
+// it is available for allocation
+func RequestPort(ip net.IP, proto string, port int) (int, error) {
+	lock.Lock()
+	defer lock.Unlock()
+
+	if err := validateProtocol(proto); err != nil {
+		return 0, err
+	}
+
+	// If the user requested a specific port to be allocated
+	if port != 0 {
+		if err := registerSetPort(ip, proto, port); err != nil {
+			return 0, err
+		}
+		return port, nil
+	}
+	return registerDynamicPort(ip, proto)
+}
+
+// ReleasePort will return the provided port back into the
+// pool for reuse
+func ReleasePort(ip net.IP, proto string, port int) error {
+	lock.Lock()
+	defer lock.Unlock()
+
+	if err := validateProtocol(proto); err != nil {
+		return err
+	}
+
+	allocated := defaultAllocatedPorts[proto]
+	allocated.Remove(port)
+
+	if !equalsDefault(ip) {
+		registerIP(ip)
+
+		// Remove the port for the specific ip address
+		allocated = otherAllocatedPorts[ip.String()][proto]
+		allocated.Remove(port)
+	}
+	return nil
+}
+
+func ReleaseAll() error {
+	lock.Lock()
+	defer lock.Unlock()
+
+	currentDynamicPort["tcp"] = BeginPortRange - 1
+	currentDynamicPort["udp"] = BeginPortRange - 1
+
+	defaultAllocatedPorts = portMappings{}
+	defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet()
+	defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet()
+
+	otherAllocatedPorts = ipMapping{}
+
+	return nil
+}
+
+func registerDynamicPort(ip net.IP, proto string) (int, error) {
+	allocated := defaultAllocatedPorts[proto]
+
+	port := nextPort(proto)
+	if port > EndPortRange {
+		return 0, ErrPortExceedsRange
+	}
+
+	if !equalsDefault(ip) {
+		registerIP(ip)
+
+		ipAllocated := otherAllocatedPorts[ip.String()][proto]
+		ipAllocated.Push(port)
+	} else {
+		allocated.Push(port)
+	}
+	return port, nil
+}
+
+func registerSetPort(ip net.IP, proto string, port int) error {
+	allocated := defaultAllocatedPorts[proto]
+	if allocated.Exists(port) {
+		return ErrPortAlreadyAllocated
+	}
+
+	if !equalsDefault(ip) {
+		registerIP(ip)
+
+		ipAllocated := otherAllocatedPorts[ip.String()][proto]
+		if ipAllocated.Exists(port) {
+			return ErrPortAlreadyAllocated
+		}
+		ipAllocated.Push(port)
+	} else {
+		allocated.Push(port)
+	}
+	return nil
+}
+
+func equalsDefault(ip net.IP) bool {
+	return ip == nil || ip.Equal(defaultIP)
+}
+
+func nextPort(proto string) int {
+	c := currentDynamicPort[proto] + 1
+	currentDynamicPort[proto] = c
+	return c
+}
+
+func registerIP(ip net.IP) {
+	if _, exists := otherAllocatedPorts[ip.String()]; !exists {
+		otherAllocatedPorts[ip.String()] = portMappings{
+			"tcp": collections.NewOrderedIntSet(),
+			"udp": collections.NewOrderedIntSet(),
+		}
+	}
+}
+
+func validateProtocol(proto string) error {
+	if _, exists := defaultAllocatedPorts[proto]; !exists {
+		return ErrUnknownProtocol
+	}
+	return nil
+}

+ 184 - 0
networkdriver/portallocator/portallocator_test.go

@@ -0,0 +1,184 @@
+package portallocator
+
+import (
+	"net"
+	"testing"
+)
+
+func reset() {
+	ReleaseAll()
+}
+
+func TestRequestNewPort(t *testing.T) {
+	defer reset()
+
+	port, err := RequestPort(defaultIP, "tcp", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if expected := BeginPortRange; port != expected {
+		t.Fatalf("Expected port %d got %d", expected, port)
+	}
+}
+
+func TestRequestSpecificPort(t *testing.T) {
+	defer reset()
+
+	port, err := RequestPort(defaultIP, "tcp", 5000)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if port != 5000 {
+		t.Fatalf("Expected port 5000 got %d", port)
+	}
+}
+
+func TestReleasePort(t *testing.T) {
+	defer reset()
+
+	port, err := RequestPort(defaultIP, "tcp", 5000)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if port != 5000 {
+		t.Fatalf("Expected port 5000 got %d", port)
+	}
+
+	if err := ReleasePort(defaultIP, "tcp", 5000); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestReuseReleasedPort(t *testing.T) {
+	defer reset()
+
+	port, err := RequestPort(defaultIP, "tcp", 5000)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if port != 5000 {
+		t.Fatalf("Expected port 5000 got %d", port)
+	}
+
+	if err := ReleasePort(defaultIP, "tcp", 5000); err != nil {
+		t.Fatal(err)
+	}
+
+	port, err = RequestPort(defaultIP, "tcp", 5000)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestReleaseUnreadledPort(t *testing.T) {
+	defer reset()
+
+	port, err := RequestPort(defaultIP, "tcp", 5000)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if port != 5000 {
+		t.Fatalf("Expected port 5000 got %d", port)
+	}
+
+	port, err = RequestPort(defaultIP, "tcp", 5000)
+	if err != ErrPortAlreadyAllocated {
+		t.Fatalf("Expected error %s got %s", ErrPortAlreadyAllocated, err)
+	}
+}
+
+func TestUnknowProtocol(t *testing.T) {
+	defer reset()
+
+	if _, err := RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol {
+		t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err)
+	}
+}
+
+func TestAllocateAllPorts(t *testing.T) {
+	defer reset()
+
+	for i := 0; i <= EndPortRange-BeginPortRange; i++ {
+		port, err := RequestPort(defaultIP, "tcp", 0)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		if expected := BeginPortRange + i; port != expected {
+			t.Fatalf("Expected port %d got %d", expected, port)
+		}
+	}
+
+	if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrPortExceedsRange {
+		t.Fatalf("Expected error %s got %s", ErrPortExceedsRange, err)
+	}
+
+	_, err := RequestPort(defaultIP, "udp", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func BenchmarkAllocatePorts(b *testing.B) {
+	defer reset()
+
+	for i := 0; i < b.N; i++ {
+		for i := 0; i <= EndPortRange-BeginPortRange; i++ {
+			port, err := RequestPort(defaultIP, "tcp", 0)
+			if err != nil {
+				b.Fatal(err)
+			}
+
+			if expected := BeginPortRange + i; port != expected {
+				b.Fatalf("Expected port %d got %d", expected, port)
+			}
+		}
+		reset()
+	}
+}
+
+func TestPortAllocation(t *testing.T) {
+	defer reset()
+
+	ip := net.ParseIP("192.168.0.1")
+	ip2 := net.ParseIP("192.168.0.2")
+	if port, err := RequestPort(ip, "tcp", 80); err != nil {
+		t.Fatal(err)
+	} else if port != 80 {
+		t.Fatalf("Acquire(80) should return 80, not %d", port)
+	}
+	port, err := RequestPort(ip, "tcp", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if port <= 0 {
+		t.Fatalf("Acquire(0) should return a non-zero port")
+	}
+
+	if _, err := RequestPort(ip, "tcp", port); err == nil {
+		t.Fatalf("Acquiring a port already in use should return an error")
+	}
+
+	if newPort, err := RequestPort(ip, "tcp", 0); err != nil {
+		t.Fatal(err)
+	} else if newPort == port {
+		t.Fatalf("Acquire(0) allocated the same port twice: %d", port)
+	}
+
+	if _, err := RequestPort(ip, "tcp", 80); err == nil {
+		t.Fatalf("Acquiring a port already in use should return an error")
+	}
+	if _, err := RequestPort(ip2, "tcp", 80); err != nil {
+		t.Fatalf("It should be possible to allocate the same port on a different interface")
+	}
+	if _, err := RequestPort(ip2, "tcp", 80); err == nil {
+		t.Fatalf("Acquiring a port already in use should return an error")
+	}
+	if err := ReleasePort(ip, "tcp", 80); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := RequestPort(ip, "tcp", 80); err != nil {
+		t.Fatal(err)
+	}
+}

+ 24 - 12
networkdriver/ipallocator/ipset.go → pkg/collections/orderedintset.go

@@ -1,18 +1,22 @@
-package ipallocator
+package collections
 
 import (
-	"sort"
 	"sync"
 )
 
-// iPSet is a thread-safe sorted set and a stack.
-type iPSet struct {
+// OrderedIntSet is a thread-safe sorted set and a stack.
+type OrderedIntSet struct {
 	sync.RWMutex
 	set []int
 }
 
+// NewOrderedSet returns an initialized OrderedSet
+func NewOrderedIntSet() *OrderedIntSet {
+	return &OrderedIntSet{}
+}
+
 // Push takes a string and adds it to the set. If the elem aready exists, it has no effect.
-func (s *iPSet) Push(elem int) {
+func (s *OrderedIntSet) Push(elem int) {
 	s.RLock()
 	for _, e := range s.set {
 		if e == elem {
@@ -23,20 +27,28 @@ func (s *iPSet) Push(elem int) {
 	s.RUnlock()
 
 	s.Lock()
-	s.set = append(s.set, elem)
+
 	// Make sure the list is always sorted
-	sort.Ints(s.set)
+	for i, e := range s.set {
+		if elem < e {
+			s.set = append(s.set[:i], append([]int{elem}, s.set[i:]...)...)
+			s.Unlock()
+			return
+		}
+	}
+	// If we reach here, then elem is the biggest elem of the list.
+	s.set = append(s.set, elem)
 	s.Unlock()
 }
 
 // Pop is an alias to PopFront()
-func (s *iPSet) Pop() int {
+func (s *OrderedIntSet) Pop() int {
 	return s.PopFront()
 }
 
 // Pop returns the first elemen from the list and removes it.
 // If the list is empty, it returns 0
-func (s *iPSet) PopFront() int {
+func (s *OrderedIntSet) PopFront() int {
 	s.RLock()
 
 	for i, e := range s.set {
@@ -55,7 +67,7 @@ func (s *iPSet) PopFront() int {
 // PullBack retrieve the last element of the list.
 // The element is not removed.
 // If the list is empty, an empty element is returned.
-func (s *iPSet) PullBack() int {
+func (s *OrderedIntSet) PullBack() int {
 	if len(s.set) == 0 {
 		return 0
 	}
@@ -63,7 +75,7 @@ func (s *iPSet) PullBack() int {
 }
 
 // Exists checks if the given element present in the list.
-func (s *iPSet) Exists(elem int) bool {
+func (s *OrderedIntSet) Exists(elem int) bool {
 	for _, e := range s.set {
 		if e == elem {
 			return true
@@ -74,7 +86,7 @@ func (s *iPSet) Exists(elem int) bool {
 
 // Remove removes an element from the list.
 // If the element is not found, it has no effect.
-func (s *iPSet) Remove(elem int) {
+func (s *OrderedIntSet) Remove(elem int) {
 	for i, e := range s.set {
 		if e == elem {
 			s.set = append(s.set[:i], s.set[i+1:]...)

+ 3 - 2
runtime.go

@@ -11,6 +11,7 @@ import (
 	"github.com/dotcloud/docker/graphdriver/aufs"
 	_ "github.com/dotcloud/docker/graphdriver/devmapper"
 	_ "github.com/dotcloud/docker/graphdriver/vfs"
+	"github.com/dotcloud/docker/networkdriver/portallocator"
 	"github.com/dotcloud/docker/pkg/graphdb"
 	"github.com/dotcloud/docker/pkg/sysinfo"
 	"github.com/dotcloud/docker/utils"
@@ -740,8 +741,8 @@ func NewRuntimeFromDirectory(config *DaemonConfig) (*Runtime, error) {
 
 func (runtime *Runtime) Close() error {
 	errorsStrings := []string{}
-	if err := runtime.networkManager.Close(); err != nil {
-		utils.Errorf("runtime.networkManager.Close(): %s", err.Error())
+	if err := portallocator.ReleaseAll(); err != nil {
+		utils.Errorf("portallocator.ReleaseAll(): %s", err)
 		errorsStrings = append(errorsStrings, err.Error())
 	}
 	if err := runtime.driver.Cleanup(); err != nil {