Restrict portallocator to Docker allocated ports
Port allocation status is stored in a global map: a port detected in use will remain as such for the lifetime of the daemon. Change the behavior to only mark as allocated ports which are claimed by Docker itself (which we can trust to properly remove from the allocation map once released). Ports allocated by other applications will always be retried to account for the eventually of the port having been released. Docker-DCO-1.1-Signed-off-by: Arnaud Porterie <icecrime@gmail.com> (github: icecrime)
This commit is contained in:
parent
510f1ba438
commit
dafddf461e
4 changed files with 227 additions and 65 deletions
|
@ -11,7 +11,6 @@ import (
|
|||
"github.com/docker/libcontainer/netlink"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/ipallocator"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/portmapper"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
|
@ -354,9 +353,6 @@ func Release(job *engine.Job) engine.Status {
|
|||
var (
|
||||
id = job.Args[0]
|
||||
containerInterface = currentInterfaces.Get(id)
|
||||
ip net.IP
|
||||
port int
|
||||
proto string
|
||||
)
|
||||
|
||||
if containerInterface == nil {
|
||||
|
@ -367,22 +363,6 @@ func Release(job *engine.Job) engine.Status {
|
|||
if err := portmapper.Unmap(nat); err != nil {
|
||||
log.Printf("Unable to unmap port %s: %s", nat, err)
|
||||
}
|
||||
|
||||
// this is host mappings
|
||||
switch a := nat.(type) {
|
||||
case *net.TCPAddr:
|
||||
proto = "tcp"
|
||||
ip = a.IP
|
||||
port = a.Port
|
||||
case *net.UDPAddr:
|
||||
proto = "udp"
|
||||
ip = a.IP
|
||||
port = a.Port
|
||||
}
|
||||
|
||||
if err := portallocator.ReleasePort(ip, proto, port); err != nil {
|
||||
log.Printf("Unable to release port %s", nat)
|
||||
}
|
||||
}
|
||||
|
||||
if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil {
|
||||
|
@ -399,7 +379,7 @@ func AllocatePort(job *engine.Job) engine.Status {
|
|||
ip = defaultBindingIP
|
||||
id = job.Args[0]
|
||||
hostIP = job.Getenv("HostIP")
|
||||
origHostPort = job.GetenvInt("HostPort")
|
||||
hostPort = job.GetenvInt("HostPort")
|
||||
containerPort = job.GetenvInt("ContainerPort")
|
||||
proto = job.Getenv("Proto")
|
||||
network = currentInterfaces.Get(id)
|
||||
|
@ -409,11 +389,16 @@ func AllocatePort(job *engine.Job) engine.Status {
|
|||
ip = net.ParseIP(hostIP)
|
||||
}
|
||||
|
||||
var (
|
||||
hostPort int
|
||||
container net.Addr
|
||||
host net.Addr
|
||||
)
|
||||
// host ip, proto, and host port
|
||||
var container net.Addr
|
||||
switch proto {
|
||||
case "tcp":
|
||||
container = &net.TCPAddr{IP: network.IP, Port: containerPort}
|
||||
case "udp":
|
||||
container = &net.UDPAddr{IP: network.IP, Port: containerPort}
|
||||
default:
|
||||
return job.Errorf("unsupported address type %s", proto)
|
||||
}
|
||||
|
||||
/*
|
||||
Try up to 10 times to get a port that's not already allocated.
|
||||
|
@ -421,27 +406,22 @@ func AllocatePort(job *engine.Job) engine.Status {
|
|||
In the event of failure to bind, return the error that portmapper.Map
|
||||
yields.
|
||||
*/
|
||||
|
||||
var host net.Addr
|
||||
for i := 0; i < 10; i++ {
|
||||
// host ip, proto, and host port
|
||||
hostPort, err = portallocator.RequestPort(ip, proto, origHostPort)
|
||||
|
||||
if err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
||||
if proto == "tcp" {
|
||||
host = &net.TCPAddr{IP: ip, Port: hostPort}
|
||||
container = &net.TCPAddr{IP: network.IP, Port: containerPort}
|
||||
} else {
|
||||
host = &net.UDPAddr{IP: ip, Port: hostPort}
|
||||
container = &net.UDPAddr{IP: network.IP, Port: containerPort}
|
||||
}
|
||||
|
||||
if err = portmapper.Map(container, ip, hostPort); err == nil {
|
||||
if host, err = portmapper.Map(container, ip, hostPort); err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
job.Logf("Failed to bind %s:%d for container address %s:%d. Trying another port.", ip.String(), hostPort, network.IP.String(), containerPort)
|
||||
// There is no point in immediately retrying to map an explicitely
|
||||
// chosen port.
|
||||
if hostPort != 0 {
|
||||
job.Logf("Failed to bind %s for container address %s", host.String(), container.String())
|
||||
break
|
||||
}
|
||||
|
||||
// Automatically chosen 'free' port failed to bind: move on the next.
|
||||
job.Logf("Failed to bind %s for container address %s. Trying another port.", host.String(), container.String())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -451,12 +431,18 @@ func AllocatePort(job *engine.Job) engine.Status {
|
|||
network.PortMappings = append(network.PortMappings, host)
|
||||
|
||||
out := engine.Env{}
|
||||
out.Set("HostIP", ip.String())
|
||||
out.SetInt("HostPort", hostPort)
|
||||
|
||||
switch netAddr := host.(type) {
|
||||
case *net.TCPAddr:
|
||||
out.Set("HostIP", netAddr.IP.String())
|
||||
out.SetInt("HostPort", netAddr.Port)
|
||||
case *net.UDPAddr:
|
||||
out.Set("HostIP", netAddr.IP.String())
|
||||
out.SetInt("HostPort", netAddr.Port)
|
||||
}
|
||||
if _, err := out.WriteTo(job.Stdout); err != nil {
|
||||
return job.Error(err)
|
||||
}
|
||||
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
||||
|
|
106
daemon/networkdriver/bridge/driver_test.go
Normal file
106
daemon/networkdriver/bridge/driver_test.go
Normal file
|
@ -0,0 +1,106 @@
|
|||
package bridge
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/dotcloud/docker/engine"
|
||||
)
|
||||
|
||||
func findFreePort(t *testing.T) int {
|
||||
l, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
t.Fatal("Failed to find a free port")
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
result, err := net.ResolveTCPAddr("tcp", l.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatal("Failed to resolve address to identify free port")
|
||||
}
|
||||
return result.Port
|
||||
}
|
||||
|
||||
func newPortAllocationJob(eng *engine.Engine, port int) (job *engine.Job) {
|
||||
strPort := strconv.Itoa(port)
|
||||
|
||||
job = eng.Job("allocate_port", "container_id")
|
||||
job.Setenv("HostIP", "127.0.0.1")
|
||||
job.Setenv("HostPort", strPort)
|
||||
job.Setenv("Proto", "tcp")
|
||||
job.Setenv("ContainerPort", strPort)
|
||||
return
|
||||
}
|
||||
|
||||
func TestAllocatePortDetection(t *testing.T) {
|
||||
eng := engine.New()
|
||||
eng.Logging = false
|
||||
|
||||
freePort := findFreePort(t)
|
||||
|
||||
// Init driver
|
||||
job := eng.Job("initdriver")
|
||||
if res := InitDriver(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to initialize network driver")
|
||||
}
|
||||
|
||||
// Allocate interface
|
||||
job = eng.Job("allocate_interface", "container_id")
|
||||
if res := Allocate(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to allocate network interface")
|
||||
}
|
||||
|
||||
// Allocate same port twice, expect failure on second call
|
||||
job = newPortAllocationJob(eng, freePort)
|
||||
if res := AllocatePort(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to find a free port to allocate")
|
||||
}
|
||||
if res := AllocatePort(job); res == engine.StatusOK {
|
||||
t.Fatal("Duplicate port allocation granted by AllocatePort")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocatePortReclaim(t *testing.T) {
|
||||
eng := engine.New()
|
||||
eng.Logging = false
|
||||
|
||||
freePort := findFreePort(t)
|
||||
|
||||
// Init driver
|
||||
job := eng.Job("initdriver")
|
||||
if res := InitDriver(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to initialize network driver")
|
||||
}
|
||||
|
||||
// Allocate interface
|
||||
job = eng.Job("allocate_interface", "container_id")
|
||||
if res := Allocate(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to allocate network interface")
|
||||
}
|
||||
|
||||
// Occupy port
|
||||
listenAddr := fmt.Sprintf(":%d", freePort)
|
||||
tcpListenAddr, err := net.ResolveTCPAddr("tcp", listenAddr)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to resolve TCP address '%s'", listenAddr)
|
||||
}
|
||||
|
||||
l, err := net.ListenTCP("tcp", tcpListenAddr)
|
||||
if err != nil {
|
||||
t.Fatalf("Fail to listen on port %d", freePort)
|
||||
}
|
||||
|
||||
// Allocate port, expect failure
|
||||
job = newPortAllocationJob(eng, freePort)
|
||||
if res := AllocatePort(job); res == engine.StatusOK {
|
||||
t.Fatal("Successfully allocated currently used port")
|
||||
}
|
||||
|
||||
// Reclaim port, retry allocation
|
||||
l.Close()
|
||||
if res := AllocatePort(job); res != engine.StatusOK {
|
||||
t.Fatal("Failed to allocate previously reclaimed port")
|
||||
}
|
||||
}
|
|
@ -3,10 +3,12 @@ package portmapper
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
"github.com/dotcloud/docker/pkg/proxy"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/dotcloud/docker/daemon/networkdriver/portallocator"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
"github.com/dotcloud/docker/pkg/proxy"
|
||||
)
|
||||
|
||||
type mapping struct {
|
||||
|
@ -31,47 +33,87 @@ var (
|
|||
ErrPortNotMapped = errors.New("port is not mapped")
|
||||
)
|
||||
|
||||
type genericAddr struct {
|
||||
IP net.IP
|
||||
Port int
|
||||
}
|
||||
|
||||
func (g *genericAddr) Network() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (g *genericAddr) String() string {
|
||||
return fmt.Sprintf("%s:%d", g.IP.String(), g.Port)
|
||||
}
|
||||
|
||||
func SetIptablesChain(c *iptables.Chain) {
|
||||
chain = c
|
||||
}
|
||||
|
||||
func Map(container net.Addr, hostIP net.IP, hostPort int) error {
|
||||
func Map(container net.Addr, hostIP net.IP, hostPort int) (net.Addr, error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
var m *mapping
|
||||
var (
|
||||
m *mapping
|
||||
err error
|
||||
proto string
|
||||
allocatedHostPort int
|
||||
)
|
||||
|
||||
switch container.(type) {
|
||||
case *net.TCPAddr:
|
||||
proto = "tcp"
|
||||
if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil {
|
||||
return &net.TCPAddr{IP: hostIP, Port: hostPort}, err
|
||||
}
|
||||
m = &mapping{
|
||||
proto: "tcp",
|
||||
host: &net.TCPAddr{IP: hostIP, Port: hostPort},
|
||||
proto: proto,
|
||||
host: &net.TCPAddr{IP: hostIP, Port: allocatedHostPort},
|
||||
container: container,
|
||||
}
|
||||
case *net.UDPAddr:
|
||||
proto = "udp"
|
||||
if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil {
|
||||
return &net.UDPAddr{IP: hostIP, Port: hostPort}, err
|
||||
}
|
||||
m = &mapping{
|
||||
proto: "udp",
|
||||
host: &net.UDPAddr{IP: hostIP, Port: hostPort},
|
||||
proto: proto,
|
||||
host: &net.UDPAddr{IP: hostIP, Port: allocatedHostPort},
|
||||
container: container,
|
||||
}
|
||||
default:
|
||||
return ErrUnknownBackendAddressType
|
||||
// Always return a proper net.Addr for proper reporting.
|
||||
return &genericAddr{IP: hostIP, Port: hostPort}, ErrUnknownBackendAddressType
|
||||
}
|
||||
|
||||
// When binding fails:
|
||||
// - for a specifically requested port: it might be locked by some other
|
||||
// process, so we want to allow for an ulterior retry
|
||||
// - for an automatically allocated port: it falls in the Docker range of
|
||||
// ports, so we'll just remember it as used and try the next free one
|
||||
defer func() {
|
||||
if err != nil && hostPort != 0 {
|
||||
portallocator.ReleasePort(hostIP, proto, allocatedHostPort)
|
||||
}
|
||||
}()
|
||||
|
||||
key := getKey(m.host)
|
||||
if _, exists := currentMappings[key]; exists {
|
||||
return ErrPortMappedForIP
|
||||
err = ErrPortMappedForIP
|
||||
return m.host, err
|
||||
}
|
||||
|
||||
containerIP, containerPort := getIPAndPort(m.container)
|
||||
if err := forward(iptables.Add, m.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
|
||||
return err
|
||||
if err := forward(iptables.Add, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil {
|
||||
return m.host, err
|
||||
}
|
||||
|
||||
p, err := newProxy(m.host, m.container)
|
||||
if err != nil {
|
||||
// need to undo the iptables rules before we reutrn
|
||||
forward(iptables.Delete, m.proto, hostIP, hostPort, containerIP.String(), containerPort)
|
||||
return err
|
||||
forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
|
||||
return m.host, err
|
||||
}
|
||||
|
||||
m.userlandProxy = p
|
||||
|
@ -79,7 +121,7 @@ func Map(container net.Addr, hostIP net.IP, hostPort int) error {
|
|||
|
||||
go p.Run()
|
||||
|
||||
return nil
|
||||
return m.host, nil
|
||||
}
|
||||
|
||||
func Unmap(host net.Addr) error {
|
||||
|
@ -100,6 +142,18 @@ func Unmap(host net.Addr) error {
|
|||
if err := forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch a := host.(type) {
|
||||
case *net.TCPAddr:
|
||||
if err := portallocator.ReleasePort(a.IP, "tcp", a.Port); err != nil {
|
||||
return err
|
||||
}
|
||||
case *net.UDPAddr:
|
||||
if err := portallocator.ReleasePort(a.IP, "udp", a.Port); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -44,20 +44,36 @@ func TestMapPorts(t *testing.T) {
|
|||
srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
|
||||
srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")}
|
||||
|
||||
if err := Map(srcAddr1, dstIp1, 80); err != nil {
|
||||
addrEqual := func(addr1, addr2 net.Addr) bool {
|
||||
return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
|
||||
}
|
||||
|
||||
if host, err := Map(srcAddr1, dstIp1, 80); err != nil {
|
||||
t.Fatalf("Failed to allocate port: %s", err)
|
||||
} else if !addrEqual(dstAddr1, host) {
|
||||
t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
|
||||
dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
|
||||
}
|
||||
|
||||
if Map(srcAddr1, dstIp1, 80) == nil {
|
||||
if host, err := Map(srcAddr1, dstIp1, 80); err == nil {
|
||||
t.Fatalf("Port is in use - mapping should have failed")
|
||||
} else if !addrEqual(dstAddr1, host) {
|
||||
t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
|
||||
dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
|
||||
}
|
||||
|
||||
if Map(srcAddr2, dstIp1, 80) == nil {
|
||||
if host, err := Map(srcAddr2, dstIp1, 80); err == nil {
|
||||
t.Fatalf("Port is in use - mapping should have failed")
|
||||
} else if !addrEqual(dstAddr1, host) {
|
||||
t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
|
||||
dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
|
||||
}
|
||||
|
||||
if err := Map(srcAddr2, dstIp2, 80); err != nil {
|
||||
if host, err := Map(srcAddr2, dstIp2, 80); err != nil {
|
||||
t.Fatalf("Failed to allocate port: %s", err)
|
||||
} else if !addrEqual(dstAddr2, host) {
|
||||
t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
|
||||
dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
|
||||
}
|
||||
|
||||
if Unmap(dstAddr1) != nil {
|
||||
|
|
Loading…
Add table
Reference in a new issue