Merge pull request #24237 from aaronlehmann/listen-addr

Split advertised address from listen address; change address detection strategy
This commit is contained in:
Aaron Lehmann 2016-07-24 11:03:14 -07:00 committed by GitHub
commit 9c1be541ff
76 changed files with 2306 additions and 659 deletions

View file

@ -1,7 +1,9 @@
package swarm package swarm
import ( import (
"errors"
"fmt" "fmt"
"strings"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -21,7 +23,9 @@ const (
type initOptions struct { type initOptions struct {
swarmOptions swarmOptions
listenAddr NodeAddrOption listenAddr NodeAddrOption
// Not a NodeAddrOption because it has no default port.
advertiseAddr string
forceNewCluster bool forceNewCluster bool
} }
@ -40,7 +44,8 @@ func newInitCommand(dockerCli *client.DockerCli) *cobra.Command {
} }
flags := cmd.Flags() flags := cmd.Flags()
flags.Var(&opts.listenAddr, "listen-addr", "Listen address") flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: <ip|interface>[:port])")
flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: <ip|interface>[:port])")
flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state.") flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state.")
addSwarmFlags(flags, &opts.swarmOptions) addSwarmFlags(flags, &opts.swarmOptions)
return cmd return cmd
@ -52,12 +57,16 @@ func runInit(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts initOptions
req := swarm.InitRequest{ req := swarm.InitRequest{
ListenAddr: opts.listenAddr.String(), ListenAddr: opts.listenAddr.String(),
AdvertiseAddr: opts.advertiseAddr,
ForceNewCluster: opts.forceNewCluster, ForceNewCluster: opts.forceNewCluster,
Spec: opts.swarmOptions.ToSpec(), Spec: opts.swarmOptions.ToSpec(),
} }
nodeID, err := client.SwarmInit(ctx, req) nodeID, err := client.SwarmInit(ctx, req)
if err != nil { if err != nil {
if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") {
return errors.New(err.Error() + " - specify one with --advertise-addr")
}
return err return err
} }

View file

@ -14,7 +14,9 @@ import (
type joinOptions struct { type joinOptions struct {
remote string remote string
listenAddr NodeAddrOption listenAddr NodeAddrOption
token string // Not a NodeAddrOption because it has no default port.
advertiseAddr string
token string
} }
func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command { func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command {
@ -33,7 +35,8 @@ func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command {
} }
flags := cmd.Flags() flags := cmd.Flags()
flags.Var(&opts.listenAddr, flagListenAddr, "Listen address") flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: <ip|interface>[:port])")
flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: <ip|interface>[:port])")
flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm") flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm")
return cmd return cmd
} }
@ -43,9 +46,10 @@ func runJoin(dockerCli *client.DockerCli, opts joinOptions) error {
ctx := context.Background() ctx := context.Background()
req := swarm.JoinRequest{ req := swarm.JoinRequest{
JoinToken: opts.token, JoinToken: opts.token,
ListenAddr: opts.listenAddr.String(), ListenAddr: opts.listenAddr.String(),
RemoteAddrs: []string{opts.remote}, AdvertiseAddr: opts.advertiseAddr,
RemoteAddrs: []string{opts.remote},
} }
err := client.SwarmJoin(ctx, req) err := client.SwarmJoin(ctx, req)
if err != nil { if err != nil {

View file

@ -18,6 +18,7 @@ const (
flagCertExpiry = "cert-expiry" flagCertExpiry = "cert-expiry"
flagDispatcherHeartbeat = "dispatcher-heartbeat" flagDispatcherHeartbeat = "dispatcher-heartbeat"
flagListenAddr = "listen-addr" flagListenAddr = "listen-addr"
flagAdvertiseAddr = "advertise-addr"
flagToken = "token" flagToken = "token"
flagTaskHistoryLimit = "task-history-limit" flagTaskHistoryLimit = "task-history-limit"
flagExternalCA = "external-ca" flagExternalCA = "external-ca"

View file

@ -86,6 +86,7 @@ func runInfo(dockerCli *client.DockerCli) error {
fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers) fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers)
fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes) fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes)
} }
fmt.Fprintf(dockerCli.Out(), " Node Address: %s\n", info.Swarm.NodeAddr)
} }
if len(info.Runtimes) > 0 { if len(info.Runtimes) > 0 {

View file

@ -274,9 +274,11 @@ func (cli *DaemonCli) start() (err error) {
name, _ := os.Hostname() name, _ := os.Hostname()
c, err := cluster.New(cluster.Config{ c, err := cluster.New(cluster.Config{
Root: cli.Config.Root, Root: cli.Config.Root,
Name: name, Name: name,
Backend: d, Backend: d,
NetworkSubnetsProvider: d,
DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr,
}) })
if err != nil { if err != nil {
logrus.Fatalf("Error creating cluster component: %v", err) logrus.Fatalf("Error creating cluster component: %v", err)

View file

@ -1839,11 +1839,17 @@ _docker_swarm_init() {
fi fi
return return
;; ;;
--advertise-addr)
if [[ $cur == *: ]] ; then
COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) )
fi
return
;;
esac esac
case "$cur" in case "$cur" in
-*) -*)
COMPREPLY=( $( compgen -W "--force-new-cluster --help --listen-addr" -- "$cur" ) ) COMPREPLY=( $( compgen -W "--advertise-addr --force-new-cluster --help --listen-addr" -- "$cur" ) )
;; ;;
esac esac
} }
@ -1873,11 +1879,17 @@ _docker_swarm_join() {
fi fi
return return
;; ;;
--advertise-addr)
if [[ $cur == *: ]] ; then
COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) )
fi
return
;;
esac esac
case "$cur" in case "$cur" in
-*) -*)
COMPREPLY=( $( compgen -W "--help --listen-addr --token" -- "$cur" ) ) COMPREPLY=( $( compgen -W "--adveritse-addr --help --listen-addr --token" -- "$cur" ) )
;; ;;
*:) *:)
COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) )

View file

@ -1203,6 +1203,7 @@ __docker_swarm_subcommand() {
(init) (init)
_arguments $(__docker_arguments) \ _arguments $(__docker_arguments) \
$opts_help \ $opts_help \
"($help)--advertise-addr[Advertised address]:ip\:port: " \
"($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \
"($help)--force-new-cluster[Force create a new cluster from current state]" \ "($help)--force-new-cluster[Force create a new cluster from current state]" \
"($help)--listen-addr=[Listen address]:ip\:port: " && ret=0 "($help)--listen-addr=[Listen address]:ip\:port: " && ret=0
@ -1215,6 +1216,7 @@ __docker_swarm_subcommand() {
(join) (join)
_arguments $(__docker_arguments) \ _arguments $(__docker_arguments) \
$opts_help \ $opts_help \
"($help)--advertise-addr[Advertised address]:ip\:port: " \
"($help)--listen-addr=[Listen address]:ip\:port: " \ "($help)--listen-addr=[Listen address]:ip\:port: " \
"($help)--token=[Token for entry into the swarm]:secret: " \ "($help)--token=[Token for entry into the swarm]:secret: " \
"($help -):host\:port: " && ret=0 "($help -):host\:port: " && ret=0

View file

@ -4,6 +4,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -73,14 +74,35 @@ var defaultSpec = types.Spec{
} }
type state struct { type state struct {
// LocalAddr is this machine's local IP or hostname, if specified.
LocalAddr string
// RemoteAddr is the address that was given to "swarm join. It is used
// to find LocalAddr if necessary.
RemoteAddr string
// ListenAddr is the address we bind to, including a port.
ListenAddr string ListenAddr string
// AdvertiseAddr is the address other nodes should connect to,
// including a port.
AdvertiseAddr string
}
// NetworkSubnetsProvider exposes functions for retrieving the subnets
// of networks managed by Docker, so they can be filtered.
type NetworkSubnetsProvider interface {
V4Subnets() []net.IPNet
V6Subnets() []net.IPNet
} }
// Config provides values for Cluster. // Config provides values for Cluster.
type Config struct { type Config struct {
Root string Root string
Name string Name string
Backend executorpkg.Backend Backend executorpkg.Backend
NetworkSubnetsProvider NetworkSubnetsProvider
// DefaultAdvertiseAddr is the default host/IP or network interface to use
// if no AdvertiseAddr value is specified.
DefaultAdvertiseAddr string
} }
// Cluster provides capabilities to participate in a cluster as a worker or a // Cluster provides capabilities to participate in a cluster as a worker or a
@ -88,13 +110,17 @@ type Config struct {
type Cluster struct { type Cluster struct {
sync.RWMutex sync.RWMutex
*node *node
root string root string
config Config config Config
configEvent chan struct{} // todo: make this array and goroutine safe configEvent chan struct{} // todo: make this array and goroutine safe
listenAddr string localAddr string
stop bool actualLocalAddr string // after resolution, not persisted
err error remoteAddr string
cancelDelay func() listenAddr string
advertiseAddr string
stop bool
err error
cancelDelay func()
} }
type node struct { type node struct {
@ -126,7 +152,7 @@ func New(config Config) (*Cluster, error) {
return nil, err return nil, err
} }
n, err := c.startNewNode(false, st.ListenAddr, "", "") n, err := c.startNewNode(false, st.LocalAddr, st.RemoteAddr, st.ListenAddr, st.AdvertiseAddr, "", "")
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -162,7 +188,12 @@ func (c *Cluster) loadState() (*state, error) {
} }
func (c *Cluster) saveState() error { func (c *Cluster) saveState() error {
dt, err := json.Marshal(state{ListenAddr: c.listenAddr}) dt, err := json.Marshal(state{
LocalAddr: c.localAddr,
RemoteAddr: c.remoteAddr,
ListenAddr: c.listenAddr,
AdvertiseAddr: c.advertiseAddr,
})
if err != nil { if err != nil {
return err return err
} }
@ -195,7 +226,7 @@ func (c *Cluster) reconnectOnFailure(n *node) {
return return
} }
var err error var err error
n, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "") n, err = c.startNewNode(false, c.localAddr, c.getRemoteAddress(), c.listenAddr, c.advertiseAddr, c.getRemoteAddress(), "")
if err != nil { if err != nil {
c.err = err c.err = err
close(n.done) close(n.done)
@ -204,24 +235,55 @@ func (c *Cluster) reconnectOnFailure(n *node) {
} }
} }
func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, joinToken string) (*node, error) { func (c *Cluster) startNewNode(forceNewCluster bool, localAddr, remoteAddr, listenAddr, advertiseAddr, joinAddr, joinToken string) (*node, error) {
if err := c.config.Backend.IsSwarmCompatible(); err != nil { if err := c.config.Backend.IsSwarmCompatible(); err != nil {
return nil, err return nil, err
} }
actualLocalAddr := localAddr
if actualLocalAddr == "" {
// If localAddr was not specified, resolve it automatically
// based on the route to joinAddr. localAddr can only be left
// empty on "join".
listenHost, _, err := net.SplitHostPort(listenAddr)
if err != nil {
return nil, fmt.Errorf("could not parse listen address: %v", err)
}
listenAddrIP := net.ParseIP(listenHost)
if listenAddrIP == nil || !listenAddrIP.IsUnspecified() {
actualLocalAddr = listenHost
} else {
if remoteAddr == "" {
// Should never happen except using swarms created by
// old versions that didn't save remoteAddr.
remoteAddr = "8.8.8.8:53"
}
conn, err := net.Dial("udp", remoteAddr)
if err != nil {
return nil, fmt.Errorf("could not find local IP address: %v", err)
}
localHostPort := conn.LocalAddr().String()
actualLocalAddr, _, _ = net.SplitHostPort(localHostPort)
conn.Close()
}
}
c.node = nil c.node = nil
c.cancelDelay = nil c.cancelDelay = nil
c.stop = false c.stop = false
n, err := swarmagent.NewNode(&swarmagent.NodeConfig{ n, err := swarmagent.NewNode(&swarmagent.NodeConfig{
Hostname: c.config.Name, Hostname: c.config.Name,
ForceNewCluster: forceNewCluster, ForceNewCluster: forceNewCluster,
ListenControlAPI: filepath.Join(c.root, controlSocket), ListenControlAPI: filepath.Join(c.root, controlSocket),
ListenRemoteAPI: listenAddr, ListenRemoteAPI: listenAddr,
JoinAddr: joinAddr, AdvertiseRemoteAPI: advertiseAddr,
StateDir: c.root, JoinAddr: joinAddr,
JoinToken: joinToken, StateDir: c.root,
Executor: container.NewExecutor(c.config.Backend), JoinToken: joinToken,
HeartbeatTick: 1, Executor: container.NewExecutor(c.config.Backend),
ElectionTick: 3, HeartbeatTick: 1,
ElectionTick: 3,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@ -236,8 +298,13 @@ func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, joinT
reconnectDelay: initialReconnectDelay, reconnectDelay: initialReconnectDelay,
} }
c.node = node c.node = node
c.localAddr = localAddr
c.actualLocalAddr = actualLocalAddr // not saved
c.remoteAddr = remoteAddr
c.listenAddr = listenAddr c.listenAddr = listenAddr
c.advertiseAddr = advertiseAddr
c.saveState() c.saveState()
c.config.Backend.SetClusterProvider(c) c.config.Backend.SetClusterProvider(c)
go func() { go func() {
err := n.Err(ctx) err := n.Err(ctx)
@ -301,8 +368,49 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
return "", err return "", err
} }
listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
if err != nil {
c.Unlock()
return "", err
}
advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
if err != nil {
c.Unlock()
return "", err
}
localAddr := listenHost
// If the advertise address is not one of the system's
// addresses, we also require a listen address.
listenAddrIP := net.ParseIP(listenHost)
if listenAddrIP != nil && listenAddrIP.IsUnspecified() {
advertiseIP := net.ParseIP(advertiseHost)
if advertiseIP == nil {
// not an IP
c.Unlock()
return "", errMustSpecifyListenAddr
}
systemIPs := listSystemIPs()
found := false
for _, systemIP := range systemIPs {
if systemIP.Equal(advertiseIP) {
found = true
break
}
}
if !found {
c.Unlock()
return "", errMustSpecifyListenAddr
}
localAddr = advertiseIP.String()
}
// todo: check current state existing // todo: check current state existing
n, err := c.startNewNode(req.ForceNewCluster, req.ListenAddr, "", "") n, err := c.startNewNode(req.ForceNewCluster, localAddr, "", net.JoinHostPort(listenHost, listenPort), net.JoinHostPort(advertiseHost, advertisePort), "", "")
if err != nil { if err != nil {
c.Unlock() c.Unlock()
return "", err return "", err
@ -339,8 +447,23 @@ func (c *Cluster) Join(req types.JoinRequest) error {
c.Unlock() c.Unlock()
return err return err
} }
listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
if err != nil {
c.Unlock()
return err
}
var advertiseAddr string
advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
// For joining, we don't need to provide an advertise address,
// since the remote side can detect it.
if err == nil {
advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort)
}
// todo: check current state existing // todo: check current state existing
n, err := c.startNewNode(false, req.ListenAddr, req.RemoteAddrs[0], req.JoinToken) n, err := c.startNewNode(false, "", req.RemoteAddrs[0], net.JoinHostPort(listenHost, listenPort), advertiseAddr, req.RemoteAddrs[0], req.JoinToken)
if err != nil { if err != nil {
c.Unlock() c.Unlock()
return err return err
@ -530,15 +653,22 @@ func (c *Cluster) IsAgent() bool {
return c.node != nil && c.ready return c.node != nil && c.ready
} }
// GetListenAddress returns the listening address for current manager's // GetLocalAddress returns the local address.
// consensus and dispatcher APIs. func (c *Cluster) GetLocalAddress() string {
func (c *Cluster) GetListenAddress() string {
c.RLock() c.RLock()
defer c.RUnlock() defer c.RUnlock()
if c.isActiveManager() { return c.actualLocalAddr
return c.listenAddr }
// GetAdvertiseAddress returns the remotely reachable address of this node.
func (c *Cluster) GetAdvertiseAddress() string {
c.RLock()
defer c.RUnlock()
if c.advertiseAddr != "" {
advertiseHost, _, _ := net.SplitHostPort(c.advertiseAddr)
return advertiseHost
} }
return "" return c.actualLocalAddr
} }
// GetRemoteAddress returns a known advertise address of a remote manager if // GetRemoteAddress returns a known advertise address of a remote manager if
@ -572,7 +702,10 @@ func (c *Cluster) ListenClusterEvents() <-chan struct{} {
// Info returns information about the current cluster state. // Info returns information about the current cluster state.
func (c *Cluster) Info() types.Info { func (c *Cluster) Info() types.Info {
var info types.Info info := types.Info{
NodeAddr: c.GetAdvertiseAddress(),
}
c.RLock() c.RLock()
defer c.RUnlock() defer c.RUnlock()

View file

@ -0,0 +1,268 @@
package cluster
import (
"errors"
"fmt"
"net"
)
var (
errNoSuchInterface = errors.New("no such interface")
errMultipleIPs = errors.New("could not choose an IP address to advertise since this system has multiple addresses")
errNoIP = errors.New("could not find the system's IP address")
errMustSpecifyListenAddr = errors.New("must specify a listening address because the address to advertise is not recognized as a system address")
errBadListenAddr = errors.New("listen address must be an IP address or network interface (with optional port number)")
errBadAdvertiseAddr = errors.New("advertise address must be an IP address or network interface (with optional port number)")
errBadDefaultAdvertiseAddr = errors.New("default advertise address must be an IP address or network interface (without a port number)")
)
func resolveListenAddr(specifiedAddr string) (string, string, error) {
specifiedHost, specifiedPort, err := net.SplitHostPort(specifiedAddr)
if err != nil {
return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr)
}
// Does the host component match any of the interface names on the
// system? If so, use the address from that interface.
interfaceAddr, err := resolveInterfaceAddr(specifiedHost)
if err == nil {
return interfaceAddr.String(), specifiedPort, nil
}
if err != errNoSuchInterface {
return "", "", err
}
// If it's not an interface, it must be an IP (for now)
if net.ParseIP(specifiedHost) == nil {
return "", "", errBadListenAddr
}
return specifiedHost, specifiedPort, nil
}
func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) {
// Approach:
// - If an advertise address is specified, use that. Resolve the
// interface's address if an interface was specified in
// advertiseAddr. Fill in the port from listenAddrPort if necessary.
// - If DefaultAdvertiseAddr is not empty, use that with the port from
// listenAddrPort. Resolve the interface's address from
// if an interface name was specified in DefaultAdvertiseAddr.
// - Otherwise, try to autodetect the system's address. Use the port in
// listenAddrPort with this address if autodetection succeeds.
if advertiseAddr != "" {
advertiseHost, advertisePort, err := net.SplitHostPort(advertiseAddr)
if err != nil {
// Not a host:port specification
advertiseHost = advertiseAddr
advertisePort = listenAddrPort
}
// Does the host component match any of the interface names on the
// system? If so, use the address from that interface.
interfaceAddr, err := resolveInterfaceAddr(advertiseHost)
if err == nil {
return interfaceAddr.String(), advertisePort, nil
}
if err != errNoSuchInterface {
return "", "", err
}
// If it's not an interface, it must be an IP (for now)
if net.ParseIP(advertiseHost) == nil {
return "", "", errBadAdvertiseAddr
}
return advertiseHost, advertisePort, nil
}
if c.config.DefaultAdvertiseAddr != "" {
// Does the default advertise address component match any of the
// interface names on the system? If so, use the address from
// that interface.
interfaceAddr, err := resolveInterfaceAddr(c.config.DefaultAdvertiseAddr)
if err == nil {
return interfaceAddr.String(), listenAddrPort, nil
}
if err != errNoSuchInterface {
return "", "", err
}
// If it's not an interface, it must be an IP (for now)
if net.ParseIP(c.config.DefaultAdvertiseAddr) == nil {
return "", "", errBadDefaultAdvertiseAddr
}
return c.config.DefaultAdvertiseAddr, listenAddrPort, nil
}
systemAddr, err := c.resolveSystemAddr()
if err != nil {
return "", "", err
}
return systemAddr.String(), listenAddrPort, nil
}
func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) {
// Use a specific interface's IP address.
intf, err := net.InterfaceByName(specifiedInterface)
if err != nil {
return nil, errNoSuchInterface
}
addrs, err := intf.Addrs()
if err != nil {
return nil, err
}
var interfaceAddr4, interfaceAddr6 net.IP
for _, addr := range addrs {
ipAddr, ok := addr.(*net.IPNet)
if ok {
if ipAddr.IP.To4() != nil {
// IPv4
if interfaceAddr4 != nil {
return nil, fmt.Errorf("interface %s has more than one IPv4 address", specifiedInterface)
}
interfaceAddr4 = ipAddr.IP
} else {
// IPv6
if interfaceAddr6 != nil {
return nil, fmt.Errorf("interface %s has more than one IPv6 address", specifiedInterface)
}
interfaceAddr6 = ipAddr.IP
}
}
}
if interfaceAddr4 == nil && interfaceAddr6 == nil {
return nil, fmt.Errorf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface)
}
// In the case that there's exactly one IPv4 address
// and exactly one IPv6 address, favor IPv4 over IPv6.
if interfaceAddr4 != nil {
return interfaceAddr4, nil
}
return interfaceAddr6, nil
}
func (c *Cluster) resolveSystemAddr() (net.IP, error) {
// Use the system's only IP address, or fail if there are
// multiple addresses to choose from.
interfaces, err := net.Interfaces()
if err != nil {
return nil, err
}
var systemAddr net.IP
// List Docker-managed subnets
v4Subnets := c.config.NetworkSubnetsProvider.V4Subnets()
v6Subnets := c.config.NetworkSubnetsProvider.V6Subnets()
ifaceLoop:
for _, intf := range interfaces {
// Skip inactive interfaces and loopback interfaces
if (intf.Flags&net.FlagUp == 0) || (intf.Flags&net.FlagLoopback) != 0 {
continue
}
addrs, err := intf.Addrs()
if err != nil {
continue
}
var interfaceAddr4, interfaceAddr6 net.IP
for _, addr := range addrs {
ipAddr, ok := addr.(*net.IPNet)
// Skip loopback and link-local addresses
if !ok || !ipAddr.IP.IsGlobalUnicast() {
continue
}
if ipAddr.IP.To4() != nil {
// IPv4
// Ignore addresses in subnets that are managed by Docker.
for _, subnet := range v4Subnets {
if subnet.Contains(ipAddr.IP) {
continue ifaceLoop
}
}
if interfaceAddr4 != nil {
return nil, errMultipleIPs
}
interfaceAddr4 = ipAddr.IP
} else {
// IPv6
// Ignore addresses in subnets that are managed by Docker.
for _, subnet := range v6Subnets {
if subnet.Contains(ipAddr.IP) {
continue ifaceLoop
}
}
if interfaceAddr6 != nil {
return nil, errMultipleIPs
}
interfaceAddr6 = ipAddr.IP
}
}
// In the case that this interface has exactly one IPv4 address
// and exactly one IPv6 address, favor IPv4 over IPv6.
if interfaceAddr4 != nil {
if systemAddr != nil {
return nil, errMultipleIPs
}
systemAddr = interfaceAddr4
} else if interfaceAddr6 != nil {
if systemAddr != nil {
return nil, errMultipleIPs
}
systemAddr = interfaceAddr6
}
}
if systemAddr == nil {
return nil, errNoIP
}
return systemAddr, nil
}
func listSystemIPs() []net.IP {
interfaces, err := net.Interfaces()
if err != nil {
return nil
}
var systemAddrs []net.IP
for _, intf := range interfaces {
addrs, err := intf.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
ipAddr, ok := addr.(*net.IPNet)
if ok {
systemAddrs = append(systemAddrs, ipAddr.IP)
}
}
}
return systemAddrs
}

View file

@ -127,6 +127,13 @@ type CommonConfig struct {
// Embedded structs that allow config // Embedded structs that allow config
// deserialization without the full struct. // deserialization without the full struct.
CommonTLSOptions CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
LogConfig LogConfig
bridgeConfig // bridgeConfig holds bridge network specific configuration. bridgeConfig // bridgeConfig holds bridge network specific configuration.
registry.ServiceOptions registry.ServiceOptions
@ -167,6 +174,8 @@ func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string)
cmd.IntVar(&maxConcurrentDownloads, []string{"-max-concurrent-downloads"}, defaultMaxConcurrentDownloads, usageFn("Set the max concurrent downloads for each pull")) cmd.IntVar(&maxConcurrentDownloads, []string{"-max-concurrent-downloads"}, defaultMaxConcurrentDownloads, usageFn("Set the max concurrent downloads for each pull"))
cmd.IntVar(&maxConcurrentUploads, []string{"-max-concurrent-uploads"}, defaultMaxConcurrentUploads, usageFn("Set the max concurrent uploads for each push")) cmd.IntVar(&maxConcurrentUploads, []string{"-max-concurrent-uploads"}, defaultMaxConcurrentUploads, usageFn("Set the max concurrent uploads for each push"))
cmd.StringVar(&config.SwarmDefaultAdvertiseAddr, []string{"-swarm-default-advertise-addr"}, "", usageFn("Set default address or interface for swarm advertised address"))
config.MaxConcurrentDownloads = &maxConcurrentDownloads config.MaxConcurrentDownloads = &maxConcurrentDownloads
config.MaxConcurrentUploads = &maxConcurrentUploads config.MaxConcurrentUploads = &maxConcurrentUploads
} }

View file

@ -38,6 +38,10 @@ func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) strin
config.attachExperimentalFlags(cmd, usageFn) config.attachExperimentalFlags(cmd, usageFn)
} }
// GetExecRoot returns the user configured Exec-root
func (config *Config) GetExecRoot() string {
return config.ExecRoot
}
func (config *Config) isSwarmCompatible() error { func (config *Config) isSwarmCompatible() error {
return nil return nil
} }

View file

@ -124,6 +124,11 @@ func (config *Config) GetAllRuntimes() map[string]types.Runtime {
return rts return rts
} }
// GetExecRoot returns the user configured Exec-root
func (config *Config) GetExecRoot() string {
return config.ExecRoot
}
func (config *Config) isSwarmCompatible() error { func (config *Config) isSwarmCompatible() error {
if config.ClusterStore != "" || config.ClusterAdvertise != "" { if config.ClusterStore != "" || config.ClusterAdvertise != "" {
return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode")

View file

@ -58,6 +58,11 @@ func (config *Config) GetAllRuntimes() map[string]types.Runtime {
return map[string]types.Runtime{} return map[string]types.Runtime{}
} }
// GetExecRoot returns the user configured Exec-root
func (config *Config) GetExecRoot() string {
return ""
}
func (config *Config) isSwarmCompatible() error { func (config *Config) isSwarmCompatible() error {
return nil return nil
} }

View file

@ -728,6 +728,42 @@ func (daemon *Daemon) Unmount(container *container.Container) error {
return nil return nil
} }
// V4Subnets returns the IPv4 subnets of networks that are managed by Docker.
func (daemon *Daemon) V4Subnets() []net.IPNet {
var subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
v4Infos, _ := managedNetwork.Info().IpamInfo()
for _, v4Info := range v4Infos {
if v4Info.IPAMData.Pool != nil {
subnets = append(subnets, *v4Info.IPAMData.Pool)
}
}
}
return subnets
}
// V6Subnets returns the IPv6 subnets of networks that are managed by Docker.
func (daemon *Daemon) V6Subnets() []net.IPNet {
var subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
_, v6Infos := managedNetwork.Info().IpamInfo()
for _, v6Info := range v6Infos {
if v6Info.IPAMData.Pool != nil {
subnets = append(subnets, *v6Info.IPAMData.Pool)
}
}
}
return subnets
}
func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
operationCancelled := false operationCancelled := false
@ -1005,6 +1041,7 @@ func (daemon *Daemon) networkOptions(dconfig *Config, activeSandboxes map[string
} }
options = append(options, nwconfig.OptionDataDir(dconfig.Root)) options = append(options, nwconfig.OptionDataDir(dconfig.Root))
options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
dd := runconfig.DefaultDaemonNetworkMode() dd := runconfig.DefaultDaemonNetworkMode()
dn := runconfig.DefaultDaemonNetworkMode().NetworkName() dn := runconfig.DefaultDaemonNetworkMode().NetworkName()

View file

@ -3591,6 +3591,7 @@ Initialize a new Swarm
{ {
"ListenAddr": "0.0.0.0:4500", "ListenAddr": "0.0.0.0:4500",
"AdvertiseAddr": "192.168.1.1:4500",
"ForceNewCluster": false, "ForceNewCluster": false,
"Spec": { "Spec": {
"Orchestration": {}, "Orchestration": {},
@ -3614,8 +3615,16 @@ Initialize a new Swarm
JSON Parameters: JSON Parameters:
- **ListenAddr** Listen address used for inter-manager communication, as well as determining. - **ListenAddr** Listen address used for inter-manager communication, as well as determining
the networking interface used for the VXLAN Tunnel Endpoint (VTEP). the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an
address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port
number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is
used.
- **AdvertiseAddr** Externally reachable address advertised to other nodes. This can either be
an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port
number, like `eth0:4567`. If the port number is omitted, the port number from the listen
address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when
possible.
- **ForceNewCluster** Force creating a new Swarm even if already part of one. - **ForceNewCluster** Force creating a new Swarm even if already part of one.
- **Spec** Configuration settings of the new Swarm. - **Spec** Configuration settings of the new Swarm.
- **Orchestration** Configuration settings for the orchestration aspects of the Swarm. - **Orchestration** Configuration settings for the orchestration aspects of the Swarm.
@ -3656,6 +3665,7 @@ Join an existing new Swarm
{ {
"ListenAddr": "0.0.0.0:4500", "ListenAddr": "0.0.0.0:4500",
"AdvertiseAddr: "192.168.1.1:4500",
"RemoteAddrs": ["node1:4500"], "RemoteAddrs": ["node1:4500"],
"JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" "JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
} }
@ -3676,6 +3686,11 @@ JSON Parameters:
- **ListenAddr** Listen address used for inter-manager communication if the node gets promoted to - **ListenAddr** Listen address used for inter-manager communication if the node gets promoted to
manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP).
- **AdvertiseAddr** Externally reachable address advertised to other nodes. This can either be
an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port
number, like `eth0:4567`. If the port number is omitted, the port number from the listen
address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when
possible.
- **RemoteAddr** Address of any manager node already participating in the Swarm to join. - **RemoteAddr** Address of any manager node already participating in the Swarm to join.
- **JoinToken** Secret token for joining this Swarm. - **JoinToken** Secret token for joining this Swarm.

View file

@ -3592,6 +3592,7 @@ Initialize a new Swarm
{ {
"ListenAddr": "0.0.0.0:4500", "ListenAddr": "0.0.0.0:4500",
"AdvertiseAddr": "192.168.1.1:4500",
"ForceNewCluster": false, "ForceNewCluster": false,
"Spec": { "Spec": {
"Orchestration": {}, "Orchestration": {},
@ -3615,8 +3616,16 @@ Initialize a new Swarm
JSON Parameters: JSON Parameters:
- **ListenAddr** Listen address used for inter-manager communication, as well as determining. - **ListenAddr** Listen address used for inter-manager communication, as well as determining
the networking interface used for the VXLAN Tunnel Endpoint (VTEP). the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an
address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port
number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is
used.
- **AdvertiseAddr** Externally reachable address advertised to other nodes. This can either be
an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port
number, like `eth0:4567`. If the port number is omitted, the port number from the listen
address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when
possible.
- **ForceNewCluster** Force creating a new Swarm even if already part of one. - **ForceNewCluster** Force creating a new Swarm even if already part of one.
- **Spec** Configuration settings of the new Swarm. - **Spec** Configuration settings of the new Swarm.
- **Orchestration** Configuration settings for the orchestration aspects of the Swarm. - **Orchestration** Configuration settings for the orchestration aspects of the Swarm.
@ -3657,6 +3666,7 @@ Join an existing new Swarm
{ {
"ListenAddr": "0.0.0.0:4500", "ListenAddr": "0.0.0.0:4500",
"AdvertiseAddr": "192.168.1.1:4500",
"RemoteAddrs": ["node1:4500"], "RemoteAddrs": ["node1:4500"],
"JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" "JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
} }
@ -3677,6 +3687,11 @@ JSON Parameters:
- **ListenAddr** Listen address used for inter-manager communication if the node gets promoted to - **ListenAddr** Listen address used for inter-manager communication if the node gets promoted to
manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP).
- **AdvertiseAddr** Externally reachable address advertised to other nodes. This can either be
an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port
number, like `eth0:4567`. If the port number is omitted, the port number from the listen
address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when
possible.
- **RemoteAddr** Address of any manager node already participating in the Swarm to join. - **RemoteAddr** Address of any manager node already participating in the Swarm to join.
- **JoinToken** Secret token for joining this Swarm. - **JoinToken** Secret token for joining this Swarm.

View file

@ -69,6 +69,7 @@ Options:
-s, --storage-driver Storage driver to use -s, --storage-driver Storage driver to use
--selinux-enabled Enable selinux support --selinux-enabled Enable selinux support
--storage-opt=[] Storage driver options --storage-opt=[] Storage driver options
--swarm-default-advertise-addr Set default address or interface for swarm advertised address
--tls Use TLS; implied by --tlsverify --tls Use TLS; implied by --tlsverify
--tlscacert=~/.docker/ca.pem Trust certs signed only by this CA --tlscacert=~/.docker/ca.pem Trust certs signed only by this CA
--tlscert=~/.docker/cert.pem Path to TLS certificate file --tlscert=~/.docker/cert.pem Path to TLS certificate file
@ -1042,6 +1043,7 @@ This is a full example of the allowed configuration options on Linux:
"tlscacert": "", "tlscacert": "",
"tlscert": "", "tlscert": "",
"tlskey": "", "tlskey": "",
"swarm-default-advertise-addr": "",
"api-cors-header": "", "api-cors-header": "",
"selinux-enabled": false, "selinux-enabled": false,
"userns-remap": "", "userns-remap": "",
@ -1112,6 +1114,7 @@ This is a full example of the allowed configuration options on Windows:
"tlscacert": "", "tlscacert": "",
"tlscert": "", "tlscert": "",
"tlskey": "", "tlskey": "",
"swarm-default-advertise-addr": "",
"group": "", "group": "",
"default-ulimits": {}, "default-ulimits": {},
"bridge": "", "bridge": "",

View file

@ -17,12 +17,13 @@ Usage: docker swarm init [OPTIONS]
Initialize a swarm Initialize a swarm
Options: Options:
--advertise-addr value Advertised address (format: <ip|interface>[:port])
--cert-expiry duration Validity period for node certificates (default 2160h0m0s) --cert-expiry duration Validity period for node certificates (default 2160h0m0s)
--dispatcher-heartbeat duration Dispatcher heartbeat period (default 5s) --dispatcher-heartbeat duration Dispatcher heartbeat period (default 5s)
--external-ca value Specifications of one or more certificate signing endpoints --external-ca value Specifications of one or more certificate signing endpoints
--force-new-cluster Force create a new cluster from current state. --force-new-cluster Force create a new cluster from current state.
--help Print usage --help Print usage
--listen-addr value Listen address (default 0.0.0.0:2377) --listen-addr value Listen address (format: <ip|interface>[:port])
--task-history-limit int Task history retention limit (default 5) --task-history-limit int Task history retention limit (default 5)
``` ```
@ -31,7 +32,7 @@ in the newly created one node swarm cluster.
```bash ```bash
$ docker swarm init --listen-addr 192.168.99.121:2377 $ docker swarm init --advertise-addr 192.168.99.121
Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager. Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager.
To add a worker to this swarm, run the following command: To add a worker to this swarm, run the following command:
@ -70,11 +71,31 @@ The URL specifies the endpoint where signing requests should be submitted.
### `--force-new-cluster` ### `--force-new-cluster`
This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data.
### `--listen-addr value` ### `--listen-addr value`
The node listens for inbound swarm manager traffic on this IP:PORT The node listens for inbound Swarm manager traffic on this address. The default is to listen on
0.0.0.0:2377. It is also possible to specify a network interface to listen on that interface's
address; for example `--listen-addr eth0:2377`.
Specifying a port is optional. If the value is a bare IP address or interface
name, the default port 2377 will be used.
### `--advertise-addr value`
This flag specifies the address that will be advertised to other members of the
swarm for API access and overlay networking. If unspecified, Docker will check
if the system has a single IP address, and use that IP address with with the
listening port (see `--listen-addr`). If the system has multiple IP addresses,
`--advertise-addr` must be specified so that the correct address is chosen for
inter-manager communication and overlay networking.
It is also possible to specify a network interface to advertise that interface's address;
for example `--advertise-addr eth0:2377`.
Specifying a port is optional. If the value is a bare IP address or interface
name, the default port 2377 will be used.
### `--task-history-limit` ### `--task-history-limit`

View file

@ -17,9 +17,10 @@ Usage: docker swarm join [OPTIONS] HOST:PORT
Join a swarm as a node and/or manager Join a swarm as a node and/or manager
Options: Options:
--help Print usage --advertise-addr value Advertised address (format: <ip|interface>[:port])
--listen-addr value Listen address (default 0.0.0.0:2377) --help Print usage
--token string Token for entry into the swarm --listen-addr value Listen address (format: <ip|interface>[:port)
--token string Token for entry into the swarm
``` ```
Join a node to a swarm. The node joins as a manager node or worker node based upon the token you Join a node to a swarm. The node joins as a manager node or worker node based upon the token you
@ -31,7 +32,7 @@ pass a worker token, the node joins as a worker.
The example below demonstrates joining a manager node using a manager token. The example below demonstrates joining a manager node using a manager token.
```bash ```bash
$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 --listen-addr 192.168.99.122:2377 192.168.99.121:2377 $ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 192.168.99.121:2377
This node joined a swarm as a manager. This node joined a swarm as a manager.
$ docker node ls $ docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
@ -48,7 +49,7 @@ should join as workers instead. Managers should be stable hosts that have static
The example below demonstrates joining a worker node using a worker token. The example below demonstrates joining a worker node using a worker token.
```bash ```bash
$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx --listen-addr 192.168.99.123:2377 192.168.99.121:2377 $ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx 192.168.99.121:2377
This node joined a swarm as a worker. This node joined a swarm as a worker.
$ docker node ls $ docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
@ -59,7 +60,36 @@ dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader
### `--listen-addr value` ### `--listen-addr value`
The node listens for inbound swarm manager traffic on this IP:PORT If the node is a manager, it will listen for inbound Swarm manager traffic on this
address. The default is to listen on 0.0.0.0:2377. It is also possible to specify a
network interface to listen on that interface's address; for example `--listen-addr eth0:2377`.
Specifying a port is optional. If the value is a bare IP address, or interface
name, the default port 2377 will be used.
This flag is generally not necessary when joining an existing swarm.
### `--advertise-addr value`
This flag specifies the address that will be advertised to other members of the
swarm for API access. If unspecified, Docker will check if the system has a
single IP address, and use that IP address with with the listening port (see
`--listen-addr`). If the system has multiple IP addresses, `--advertise-addr`
must be specified so that the correct address is chosen for inter-manager
communication and overlay networking.
It is also possible to specify a network interface to advertise that interface's address;
for example `--advertise-addr eth0:2377`.
Specifying a port is optional. If the value is a bare IP address, or interface
name, the default port 2377 will be used.
This flag is generally not necessary when joining an existing swarm.
### `--manager`
Joins the node as a manager
>>>>>>> 22565e1... Split advertised address from listen address
### `--token string` ### `--token string`

View file

@ -23,14 +23,14 @@ node. For example, the tutorial uses a machine named `manager1`.
2. Run the following command to create a new swarm: 2. Run the following command to create a new swarm:
```bash ```bash
docker swarm init --listen-addr <MANAGER-IP>:<PORT> docker swarm init --advertise-addr <MANAGER-IP>
``` ```
In the tutorial, the following command creates a swarm on the `manager1` In the tutorial, the following command creates a swarm on the `manager1`
machine: machine:
```bash ```bash
$ docker swarm init --listen-addr 192.168.99.100:2377 $ docker swarm init --advertise-addr 192.168.99.100
Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager. Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager.
To add a worker to this swarm, run the following command: To add a worker to this swarm, run the following command:
@ -44,9 +44,9 @@ node. For example, the tutorial uses a machine named `manager1`.
192.168.99.100:2377 192.168.99.100:2377
``` ```
The `--listen-addr` flag configures the manager node to listen on port The `--advertise-addr` flag configures the manager node to publish its
`2377`. The other nodes in the swarm must be able to access the manager at address as `192.168.99.100`. The other nodes in the swarm must be able
the IP address. to access the manager at the IP address.
The output incudes the commands to join new nodes to the swarm. Nodes will The output incudes the commands to join new nodes to the swarm. Nodes will
join as managers or workers depending on the value for the `--swarm-token` join as managers or workers depending on the value for the `--swarm-token`

View file

@ -65,7 +65,7 @@ clone git github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837
clone git github.com/imdario/mergo 0.2.1 clone git github.com/imdario/mergo 0.2.1
#get libnetwork packages #get libnetwork packages
clone git github.com/docker/libnetwork 905d374c096ca1f3a9b75529e52518b7540179f3 clone git github.com/docker/libnetwork 83ab4deaa2da3deb32cb5e64ceec43801dc17370
clone git github.com/docker/go-events afb2b9f2c23f33ada1a22b03651775fdc65a5089 clone git github.com/docker/go-events afb2b9f2c23f33ada1a22b03651775fdc65a5089
clone git github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 clone git github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@ -75,7 +75,7 @@ clone git github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa07
clone git github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 clone git github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
clone git github.com/docker/libkv v0.2.1 clone git github.com/docker/libkv v0.2.1
clone git github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 clone git github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
clone git github.com/vishvananda/netlink 734d02c3e202f682c74b71314b2c61eec0170fd4 clone git github.com/vishvananda/netlink e73bad418fd727ed3a02830b1af1ad0283a1de6c
clone git github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 clone git github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
clone git github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 clone git github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
clone git github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d clone git github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
@ -139,7 +139,7 @@ clone git github.com/docker/docker-credential-helpers v0.3.0
clone git github.com/docker/containerd 0ac3cd1be170d180b2baed755e8f0da547ceb267 clone git github.com/docker/containerd 0ac3cd1be170d180b2baed755e8f0da547ceb267
# cluster # cluster
clone git github.com/docker/swarmkit 38857c06dafcf939a56d2650d8e0011b5aace384 clone git github.com/docker/swarmkit 4d7e44321726f011d010cdb72d2230f5db2b604e
clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028 clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028
clone git github.com/cloudflare/cfssl b895b0549c0ff676f92cf09ba971ae02bb41367b clone git github.com/cloudflare/cfssl b895b0549c0ff676f92cf09ba971ae02bb41367b

View file

@ -211,7 +211,7 @@ func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *Swarm
port: defaultSwarmPort + s.portIndex, port: defaultSwarmPort + s.portIndex,
} }
d.listenAddr = fmt.Sprintf("0.0.0.0:%d", d.port) d.listenAddr = fmt.Sprintf("0.0.0.0:%d", d.port)
err := d.StartWithBusybox("--iptables=false") // avoid networking conflicts err := d.StartWithBusybox("--iptables=false", "--swarm-default-advertise-addr=lo") // avoid networking conflicts
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
if joinSwarm == true { if joinSwarm == true {

View file

@ -624,11 +624,9 @@ func (s *DockerSwarmSuite) TestApiSwarmLeaveOnPendingJoin(c *check.C) {
go d2.Join(swarm.JoinRequest{ go d2.Join(swarm.JoinRequest{
RemoteAddrs: []string{"nosuchhost:1234"}, RemoteAddrs: []string{"nosuchhost:1234"},
}) // will block on pending state })
waitAndAssert(c, defaultReconciliationTimeout, d2.checkLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) waitAndAssert(c, defaultReconciliationTimeout, d2.checkLocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
c.Assert(d2.Leave(true), checker.IsNil)
waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1)
@ -642,9 +640,9 @@ func (s *DockerSwarmSuite) TestApiSwarmRestoreOnPendingJoin(c *check.C) {
d := s.AddDaemon(c, false, false) d := s.AddDaemon(c, false, false)
go d.Join(swarm.JoinRequest{ go d.Join(swarm.JoinRequest{
RemoteAddrs: []string{"nosuchhost:1234"}, RemoteAddrs: []string{"nosuchhost:1234"},
}) // will block on pending state })
waitAndAssert(c, defaultReconciliationTimeout, d.checkLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) waitAndAssert(c, defaultReconciliationTimeout, d.checkLocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
c.Assert(d.Stop(), checker.IsNil) c.Assert(d.Stop(), checker.IsNil)
c.Assert(d.Start(), checker.IsNil) c.Assert(d.Start(), checker.IsNil)

View file

@ -55,6 +55,7 @@ dockerd - Enable daemon mode
[**-s**|**--storage-driver**[=*STORAGE-DRIVER*]] [**-s**|**--storage-driver**[=*STORAGE-DRIVER*]]
[**--selinux-enabled**] [**--selinux-enabled**]
[**--storage-opt**[=*[]*]] [**--storage-opt**[=*[]*]]
[**--swarm-default-advertise-addr**[=*IP|INTERFACE*]]
[**--tls**] [**--tls**]
[**--tlscacert**[=*~/.docker/ca.pem*]] [**--tlscacert**[=*~/.docker/ca.pem*]]
[**--tlscert**[=*~/.docker/cert.pem*]] [**--tlscert**[=*~/.docker/cert.pem*]]
@ -239,6 +240,11 @@ output otherwise.
**--storage-opt**=[] **--storage-opt**=[]
Set storage driver options. See STORAGE DRIVER OPTIONS. Set storage driver options. See STORAGE DRIVER OPTIONS.
**--swarm-default-advertise-addr**=*IP|INTERFACE*
Set default address or interface for swarm to advertise as its externally-reachable address to other cluster
members. This can be a hostname, an IP address, or an interface such as `eth0`. A port cannot be specified with
this option.
**--tls**=*true*|*false* **--tls**=*true*|*false*
Use TLS; implied by --tlsverify. Default is false. Use TLS; implied by --tlsverify. Default is false.

View file

@ -35,6 +35,7 @@ func (b ByTime) Less(i, j int) bool { return b[i].LamportTime < b[j].LamportTime
type agent struct { type agent struct {
networkDB *networkdb.NetworkDB networkDB *networkdb.NetworkDB
bindAddr string bindAddr string
advertiseAddr string
epTblCancel func() epTblCancel func()
driverCancelFuncs map[string][]func() driverCancelFuncs map[string][]func()
} }
@ -236,25 +237,14 @@ func (c *controller) handleKeyChangeV1(keys []*types.EncryptionKey) error {
func (c *controller) agentSetup() error { func (c *controller) agentSetup() error {
clusterProvider := c.cfg.Daemon.ClusterProvider clusterProvider := c.cfg.Daemon.ClusterProvider
bindAddr, _, _ := net.SplitHostPort(clusterProvider.GetListenAddress()) bindAddr := clusterProvider.GetLocalAddress()
advAddr := clusterProvider.GetAdvertiseAddress()
remote := clusterProvider.GetRemoteAddress() remote := clusterProvider.GetRemoteAddress()
remoteAddr, _, _ := net.SplitHostPort(remote) remoteAddr, _, _ := net.SplitHostPort(remote)
// Determine the BindAddress from RemoteAddress or through best-effort routing logrus.Infof("Initializing Libnetwork Agent Local-addr=%s Adv-addr=%s Remote-addr =%s", bindAddr, advAddr, remoteAddr)
if !isValidClusteringIP(bindAddr) { if advAddr != "" && c.agent == nil {
if !isValidClusteringIP(remoteAddr) { if err := c.agentInit(bindAddr, advAddr); err != nil {
remote = "8.8.8.8:53"
}
conn, err := net.Dial("udp", remote)
if err == nil {
bindHostPort := conn.LocalAddr().String()
bindAddr, _, _ = net.SplitHostPort(bindHostPort)
conn.Close()
}
}
if bindAddr != "" && c.agent == nil {
if err := c.agentInit(bindAddr); err != nil {
logrus.Errorf("Error in agentInit : %v", err) logrus.Errorf("Error in agentInit : %v", err)
} else { } else {
c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool {
@ -312,7 +302,7 @@ func (c *controller) getPrimaryKeyTag(subsys string) ([]byte, uint64) {
return keys[1].Key, keys[1].LamportTime return keys[1].Key, keys[1].LamportTime
} }
func (c *controller) agentInit(bindAddrOrInterface string) error { func (c *controller) agentInit(bindAddrOrInterface, advertiseAddr string) error {
if !c.isAgent() { if !c.isAgent() {
return nil return nil
} }
@ -325,9 +315,9 @@ func (c *controller) agentInit(bindAddrOrInterface string) error {
keys, tags := c.getKeys(subsysGossip) keys, tags := c.getKeys(subsysGossip)
hostname, _ := os.Hostname() hostname, _ := os.Hostname()
nDB, err := networkdb.New(&networkdb.Config{ nDB, err := networkdb.New(&networkdb.Config{
BindAddr: bindAddr, AdvertiseAddr: advertiseAddr,
NodeName: hostname, NodeName: hostname,
Keys: keys, Keys: keys,
}) })
if err != nil { if err != nil {
@ -339,6 +329,7 @@ func (c *controller) agentInit(bindAddrOrInterface string) error {
c.agent = &agent{ c.agent = &agent{
networkDB: nDB, networkDB: nDB,
bindAddr: bindAddr, bindAddr: bindAddr,
advertiseAddr: advertiseAddr,
epTblCancel: cancel, epTblCancel: cancel,
driverCancelFuncs: make(map[string][]func()), driverCancelFuncs: make(map[string][]func()),
} }
@ -377,8 +368,9 @@ func (c *controller) agentDriverNotify(d driverapi.Driver) {
} }
d.DiscoverNew(discoverapi.NodeDiscovery, discoverapi.NodeDiscoveryData{ d.DiscoverNew(discoverapi.NodeDiscovery, discoverapi.NodeDiscoveryData{
Address: c.agent.bindAddr, Address: c.agent.advertiseAddr,
Self: true, BindAddress: c.agent.bindAddr,
Self: true,
}) })
drvEnc := discoverapi.DriverEncryptionConfig{} drvEnc := discoverapi.DriverEncryptionConfig{}

View file

@ -4,7 +4,8 @@ package cluster
type Provider interface { type Provider interface {
IsManager() bool IsManager() bool
IsAgent() bool IsAgent() bool
GetListenAddress() string GetLocalAddress() string
GetAdvertiseAddress() string
GetRemoteAddress() string GetRemoteAddress() string
ListenClusterEvents() <-chan struct{} ListenClusterEvents() <-chan struct{}
} }

View file

@ -11,6 +11,7 @@ import (
"github.com/docker/libnetwork/cluster" "github.com/docker/libnetwork/cluster"
"github.com/docker/libnetwork/datastore" "github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/netlabel" "github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/osl"
) )
// Config encapsulates configurations of various Libnetwork components // Config encapsulates configurations of various Libnetwork components
@ -197,6 +198,13 @@ func OptionDataDir(dataDir string) Option {
} }
} }
// OptionExecRoot function returns an option setter for exec root folder
func OptionExecRoot(execRoot string) Option {
return func(c *Config) {
osl.SetBasePath(execRoot)
}
}
// ProcessOptions processes options and stores it in config // ProcessOptions processes options and stores it in config
func (c *Config) ProcessOptions(options ...Option) { func (c *Config) ProcessOptions(options ...Option) {
for _, opt := range options { for _, opt := range options {

View file

@ -378,6 +378,10 @@ func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error {
return nil return nil
} }
c.Lock()
c.cfg = cfg
c.Unlock()
var dsConfig *discoverapi.DatastoreConfigData var dsConfig *discoverapi.DatastoreConfigData
for scope, sCfg := range cfg.Scopes { for scope, sCfg := range cfg.Scopes {
if scope == datastore.LocalScope || !sCfg.IsValid() { if scope == datastore.LocalScope || !sCfg.IsValid() {

View file

@ -26,8 +26,9 @@ const (
// NodeDiscoveryData represents the structure backing the node discovery data json string // NodeDiscoveryData represents the structure backing the node discovery data json string
type NodeDiscoveryData struct { type NodeDiscoveryData struct {
Address string Address string
Self bool BindAddress string
Self bool
} }
// DatastoreConfigData is the data for the datastore update event message // DatastoreConfigData is the data for the datastore update event message

View file

@ -83,9 +83,9 @@ func (d *driver) populateEndpoints() error {
n, ok := d.networks[ep.nid] n, ok := d.networks[ep.nid]
if !ok { if !ok {
logrus.Debugf("Network (%s) not found for restored bridge endpoint (%s)", ep.nid[0:7], ep.id[0:7]) logrus.Debugf("Network (%s) not found for restored bridge endpoint (%s)", ep.nid[0:7], ep.id[0:7])
logrus.Debugf("Deleting stale bridge endpoint (%s) from store", ep.nid[0:7]) logrus.Debugf("Deleting stale bridge endpoint (%s) from store", ep.id[0:7])
if err := d.storeDelete(ep); err != nil { if err := d.storeDelete(ep); err != nil {
logrus.Debugf("Failed to delete stale bridge endpoint (%s) from store", ep.nid[0:7]) logrus.Debugf("Failed to delete stale bridge endpoint (%s) from store", ep.id[0:7])
} }
continue continue
} }

View file

@ -82,6 +82,6 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
if err := d.storeDelete(ep); err != nil { if err := d.storeDelete(ep); err != nil {
logrus.Warnf("Failed to remove ipvlan endpoint %s from store: %v", ep.id[0:7], err) logrus.Warnf("Failed to remove ipvlan endpoint %s from store: %v", ep.id[0:7], err)
} }
n.deleteEndpoint(ep.id)
return nil return nil
} }

View file

@ -96,9 +96,9 @@ func (d *driver) populateEndpoints() error {
n, ok := d.networks[ep.nid] n, ok := d.networks[ep.nid]
if !ok { if !ok {
logrus.Debugf("Network (%s) not found for restored ipvlan endpoint (%s)", ep.nid[0:7], ep.id[0:7]) logrus.Debugf("Network (%s) not found for restored ipvlan endpoint (%s)", ep.nid[0:7], ep.id[0:7])
logrus.Debugf("Deleting stale ipvlan endpoint (%s) from store", ep.nid[0:7]) logrus.Debugf("Deleting stale ipvlan endpoint (%s) from store", ep.id[0:7])
if err := d.storeDelete(ep); err != nil { if err := d.storeDelete(ep); err != nil {
logrus.Debugf("Failed to delete stale ipvlan endpoint (%s) from store", ep.nid[0:7]) logrus.Debugf("Failed to delete stale ipvlan endpoint (%s) from store", ep.id[0:7])
} }
continue continue
} }

View file

@ -96,9 +96,9 @@ func (d *driver) populateEndpoints() error {
n, ok := d.networks[ep.nid] n, ok := d.networks[ep.nid]
if !ok { if !ok {
logrus.Debugf("Network (%s) not found for restored macvlan endpoint (%s)", ep.nid[0:7], ep.id[0:7]) logrus.Debugf("Network (%s) not found for restored macvlan endpoint (%s)", ep.nid[0:7], ep.id[0:7])
logrus.Debugf("Deleting stale macvlan endpoint (%s) from store", ep.nid[0:7]) logrus.Debugf("Deleting stale macvlan endpoint (%s) from store", ep.id[0:7])
if err := d.storeDelete(ep); err != nil { if err := d.storeDelete(ep); err != nil {
logrus.Debugf("Failed to delete stale macvlan endpoint (%s) from store", ep.nid[0:7]) logrus.Debugf("Failed to delete stale macvlan endpoint (%s) from store", ep.id[0:7])
} }
continue continue
} }

View file

@ -2,23 +2,27 @@ package overlay
import ( import (
"bytes" "bytes"
"encoding/binary"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"hash/fnv"
"net" "net"
"sync" "sync"
"syscall" "syscall"
"strconv"
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/iptables" "github.com/docker/libnetwork/iptables"
"github.com/docker/libnetwork/ns" "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/types" "github.com/docker/libnetwork/types"
"github.com/vishvananda/netlink" "github.com/vishvananda/netlink"
"strconv"
) )
const ( const (
mark = uint32(0xD0C4E3) mark = uint32(0xD0C4E3)
timeout = 30 timeout = 30
pktExpansion = 26 // SPI(4) + SeqN(4) + IV(8) + PadLength(1) + NextHeader(1) + ICV(8)
) )
const ( const (
@ -85,6 +89,7 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal
} }
lIP := types.GetMinimalIP(net.ParseIP(d.bindAddress)) lIP := types.GetMinimalIP(net.ParseIP(d.bindAddress))
aIP := types.GetMinimalIP(net.ParseIP(d.advertiseAddress))
nodes := map[string]net.IP{} nodes := map[string]net.IP{}
switch { switch {
@ -107,7 +112,7 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal
if add { if add {
for _, rIP := range nodes { for _, rIP := range nodes {
if err := setupEncryption(lIP, rIP, vxlanID, d.secMap, d.keys); err != nil { if err := setupEncryption(lIP, aIP, rIP, vxlanID, d.secMap, d.keys); err != nil {
log.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err) log.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err)
} }
} }
@ -122,7 +127,7 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal
return nil return nil
} }
func setupEncryption(localIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*key) error { func setupEncryption(localIP, advIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*key) error {
log.Debugf("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP) log.Debugf("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP)
rIPs := remoteIP.String() rIPs := remoteIP.String()
@ -134,7 +139,7 @@ func setupEncryption(localIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*
} }
for i, k := range keys { for i, k := range keys {
spis := &spi{buildSPI(localIP, remoteIP, k.tag), buildSPI(remoteIP, localIP, k.tag)} spis := &spi{buildSPI(advIP, remoteIP, k.tag), buildSPI(remoteIP, advIP, k.tag)}
dir := reverse dir := reverse
if i == 0 { if i == 0 {
dir = bidir dir = bidir
@ -216,7 +221,6 @@ func programMangle(vni uint32, add bool) (err error) {
func programSA(localIP, remoteIP net.IP, spi *spi, k *key, dir int, add bool) (fSA *netlink.XfrmState, rSA *netlink.XfrmState, err error) { func programSA(localIP, remoteIP net.IP, spi *spi, k *key, dir int, add bool) (fSA *netlink.XfrmState, rSA *netlink.XfrmState, err error) {
var ( var (
crypt *netlink.XfrmStateAlgo
action = "Removing" action = "Removing"
xfrmProgram = ns.NlHandle().XfrmStateDel xfrmProgram = ns.NlHandle().XfrmStateDel
) )
@ -224,7 +228,6 @@ func programSA(localIP, remoteIP net.IP, spi *spi, k *key, dir int, add bool) (f
if add { if add {
action = "Adding" action = "Adding"
xfrmProgram = ns.NlHandle().XfrmStateAdd xfrmProgram = ns.NlHandle().XfrmStateAdd
crypt = &netlink.XfrmStateAlgo{Name: "cbc(aes)", Key: k.value}
} }
if dir&reverse > 0 { if dir&reverse > 0 {
@ -236,7 +239,7 @@ func programSA(localIP, remoteIP net.IP, spi *spi, k *key, dir int, add bool) (f
Mode: netlink.XFRM_MODE_TRANSPORT, Mode: netlink.XFRM_MODE_TRANSPORT,
} }
if add { if add {
rSA.Crypt = crypt rSA.Aead = buildAeadAlgo(k, spi.reverse)
} }
exists, err := saExists(rSA) exists, err := saExists(rSA)
@ -261,7 +264,7 @@ func programSA(localIP, remoteIP net.IP, spi *spi, k *key, dir int, add bool) (f
Mode: netlink.XFRM_MODE_TRANSPORT, Mode: netlink.XFRM_MODE_TRANSPORT,
} }
if add { if add {
fSA.Crypt = crypt fSA.Aead = buildAeadAlgo(k, spi.forward)
} }
exists, err := saExists(fSA) exists, err := saExists(fSA)
@ -354,13 +357,23 @@ func spExists(sp *netlink.XfrmPolicy) (bool, error) {
} }
func buildSPI(src, dst net.IP, st uint32) int { func buildSPI(src, dst net.IP, st uint32) int {
spi := int(st) b := make([]byte, 4)
f := src[len(src)-4:] binary.BigEndian.PutUint32(b, st)
t := dst[len(dst)-4:] h := fnv.New32a()
for i := 0; i < 4; i++ { h.Write(src)
spi = spi ^ (int(f[i])^int(t[3-i]))<<uint32(8*i) h.Write(b)
h.Write(dst)
return int(binary.BigEndian.Uint32(h.Sum(nil)))
}
func buildAeadAlgo(k *key, s int) *netlink.XfrmStateAlgo {
salt := make([]byte, 4)
binary.BigEndian.PutUint32(salt, uint32(s))
return &netlink.XfrmStateAlgo{
Name: "rfc4106(gcm(aes))",
Key: append(k.value, salt...),
ICVLen: 64,
} }
return spi
} }
func (d *driver) secMapWalk(f func(string, []*spi) ([]*spi, bool)) error { func (d *driver) secMapWalk(f func(string, []*spi) ([]*spi, bool)) error {
@ -560,3 +573,14 @@ func updateNodeKey(lIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx,
return spis return spis
} }
func (n *network) maxMTU() int {
mtu := vxlanVethMTU
if n.secure {
// In case of encryption account for the
// esp packet espansion and padding
mtu -= pktExpansion
mtu -= (mtu % 4)
}
return mtu
}

View file

@ -75,11 +75,13 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
// Set the container interface and its peer MTU to 1450 to allow // Set the container interface and its peer MTU to 1450 to allow
// for 50 bytes vxlan encap (inner eth header(14) + outer IP(20) + // for 50 bytes vxlan encap (inner eth header(14) + outer IP(20) +
// outer UDP(8) + vxlan header(8)) // outer UDP(8) + vxlan header(8))
mtu := n.maxMTU()
veth, err := nlh.LinkByName(overlayIfName) veth, err := nlh.LinkByName(overlayIfName)
if err != nil { if err != nil {
return fmt.Errorf("cound not find link by name %s: %v", overlayIfName, err) return fmt.Errorf("cound not find link by name %s: %v", overlayIfName, err)
} }
err = nlh.LinkSetMTU(veth, vxlanVethMTU) err = nlh.LinkSetMTU(veth, mtu)
if err != nil { if err != nil {
return err return err
} }
@ -93,7 +95,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
if err != nil { if err != nil {
return fmt.Errorf("could not find link by name %s: %v", containerIfName, err) return fmt.Errorf("could not find link by name %s: %v", containerIfName, err)
} }
err = nlh.LinkSetMTU(veth, vxlanVethMTU) err = nlh.LinkSetMTU(veth, mtu)
if err != nil { if err != nil {
return err return err
} }
@ -119,7 +121,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
} }
d.peerDbAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, d.peerDbAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac,
net.ParseIP(d.bindAddress), true) net.ParseIP(d.advertiseAddress), true)
if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil { if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
log.Warn(err) log.Warn(err)
@ -128,7 +130,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
buf, err := proto.Marshal(&PeerRecord{ buf, err := proto.Marshal(&PeerRecord{
EndpointIP: ep.addr.String(), EndpointIP: ep.addr.String(),
EndpointMAC: ep.mac.String(), EndpointMAC: ep.mac.String(),
TunnelEndpointIP: d.bindAddress, TunnelEndpointIP: d.advertiseAddress,
}) })
if err != nil { if err != nil {
return err return err
@ -159,7 +161,7 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri
// Ignore local peers. We already know about them and they // Ignore local peers. We already know about them and they
// should not be added to vxlan fdb. // should not be added to vxlan fdb.
if peer.TunnelEndpointIP == d.bindAddress { if peer.TunnelEndpointIP == d.advertiseAddress {
return return
} }

View file

@ -40,7 +40,7 @@ func (d *driver) serfInit() error {
config := serf.DefaultConfig() config := serf.DefaultConfig()
config.Init() config.Init()
config.MemberlistConfig.BindAddr = d.bindAddress config.MemberlistConfig.BindAddr = d.advertiseAddress
d.eventCh = make(chan serf.Event, 4) d.eventCh = make(chan serf.Event, 4)
config.EventCh = d.eventCh config.EventCh = d.eventCh

View file

@ -31,22 +31,23 @@ const (
var initVxlanIdm = make(chan (bool), 1) var initVxlanIdm = make(chan (bool), 1)
type driver struct { type driver struct {
eventCh chan serf.Event eventCh chan serf.Event
notifyCh chan ovNotify notifyCh chan ovNotify
exitCh chan chan struct{} exitCh chan chan struct{}
bindAddress string bindAddress string
neighIP string advertiseAddress string
config map[string]interface{} neighIP string
peerDb peerNetworkMap config map[string]interface{}
secMap *encrMap peerDb peerNetworkMap
serfInstance *serf.Serf secMap *encrMap
networks networkTable serfInstance *serf.Serf
store datastore.DataStore networks networkTable
localStore datastore.DataStore store datastore.DataStore
vxlanIdm *idm.Idm localStore datastore.DataStore
once sync.Once vxlanIdm *idm.Idm
joinOnce sync.Once once sync.Once
keys []*key joinOnce sync.Once
keys []*key
sync.Mutex sync.Mutex
} }
@ -111,7 +112,11 @@ func (d *driver) restoreEndpoints() error {
ep := kvo.(*endpoint) ep := kvo.(*endpoint)
n := d.network(ep.nid) n := d.network(ep.nid)
if n == nil { if n == nil {
logrus.Debugf("Network (%s) not found for restored endpoint (%s)", ep.nid, ep.id) logrus.Debugf("Network (%s) not found for restored endpoint (%s)", ep.nid[0:7], ep.id[0:7])
logrus.Debugf("Deleting stale overlay endpoint (%s) from store", ep.id[0:7])
if err := d.deleteEndpointFromStore(ep); err != nil {
logrus.Debugf("Failed to delete stale overlay endpoint (%s) from store", ep.id[0:7])
}
continue continue
} }
n.addEndpoint(ep) n.addEndpoint(ep)
@ -140,7 +145,7 @@ func (d *driver) restoreEndpoints() error {
} }
n.incEndpointCount() n.incEndpointCount()
d.peerDbAdd(ep.nid, ep.id, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.bindAddress), true) d.peerDbAdd(ep.nid, ep.id, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), true)
} }
return nil return nil
} }
@ -211,20 +216,25 @@ func validateSelf(node string) error {
return fmt.Errorf("Multi-Host overlay networking requires cluster-advertise(%s) to be configured with a local ip-address that is reachable within the cluster", advIP.String()) return fmt.Errorf("Multi-Host overlay networking requires cluster-advertise(%s) to be configured with a local ip-address that is reachable within the cluster", advIP.String())
} }
func (d *driver) nodeJoin(node string, self bool) { func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) {
if self && !d.isSerfAlive() { if self && !d.isSerfAlive() {
if err := validateSelf(node); err != nil {
logrus.Errorf("%s", err.Error())
}
d.Lock() d.Lock()
d.bindAddress = node d.advertiseAddress = advertiseAddress
d.bindAddress = bindAddress
d.Unlock() d.Unlock()
// If there is no cluster store there is no need to start serf. // If there is no cluster store there is no need to start serf.
if d.store != nil { if d.store != nil {
if err := validateSelf(advertiseAddress); err != nil {
logrus.Warnf("%s", err.Error())
}
err := d.serfInit() err := d.serfInit()
if err != nil { if err != nil {
logrus.Errorf("initializing serf instance failed: %v", err) logrus.Errorf("initializing serf instance failed: %v", err)
d.Lock()
d.advertiseAddress = ""
d.bindAddress = ""
d.Unlock()
return return
} }
} }
@ -232,7 +242,7 @@ func (d *driver) nodeJoin(node string, self bool) {
d.Lock() d.Lock()
if !self { if !self {
d.neighIP = node d.neighIP = advertiseAddress
} }
neighIP := d.neighIP neighIP := d.neighIP
d.Unlock() d.Unlock()
@ -246,7 +256,7 @@ func (d *driver) nodeJoin(node string, self bool) {
} }
}) })
if err != nil { if err != nil {
logrus.Errorf("joining serf neighbor %s failed: %v", node, err) logrus.Errorf("joining serf neighbor %s failed: %v", advertiseAddress, err)
d.Lock() d.Lock()
d.joinOnce = sync.Once{} d.joinOnce = sync.Once{}
d.Unlock() d.Unlock()
@ -286,7 +296,7 @@ func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{})
if !ok || nodeData.Address == "" { if !ok || nodeData.Address == "" {
return fmt.Errorf("invalid discovery data") return fmt.Errorf("invalid discovery data")
} }
d.nodeJoin(nodeData.Address, nodeData.Self) d.nodeJoin(nodeData.Address, nodeData.BindAddress, nodeData.Self)
case discoverapi.DatastoreConfig: case discoverapi.DatastoreConfig:
if d.store != nil { if d.store != nil {
return types.ForbiddenErrorf("cannot accept datastore configuration: Overlay driver has a datastore configured already") return types.ForbiddenErrorf("cannot accept datastore configuration: Overlay driver has a datastore configured already")

View file

@ -113,6 +113,9 @@ func (ec *endpointCnt) updateStore() error {
if store == nil { if store == nil {
return fmt.Errorf("store not found for scope %s on endpoint count update", ec.DataScope()) return fmt.Errorf("store not found for scope %s on endpoint count update", ec.DataScope())
} }
// make a copy of count and n to avoid being overwritten by store.GetObject
count := ec.EndpointCnt()
n := ec.n
for { for {
if err := ec.n.getController().updateToStore(ec); err == nil || err != datastore.ErrKeyModified { if err := ec.n.getController().updateToStore(ec); err == nil || err != datastore.ErrKeyModified {
return err return err
@ -120,6 +123,10 @@ func (ec *endpointCnt) updateStore() error {
if err := store.GetObject(datastore.Key(ec.Key()...), ec); err != nil { if err := store.GetObject(datastore.Key(ec.Key()...), ec); err != nil {
return fmt.Errorf("could not update the kvobject to latest on endpoint count update: %v", err) return fmt.Errorf("could not update the kvobject to latest on endpoint count update: %v", err)
} }
ec.Lock()
ec.Count = count
ec.n = n
ec.Unlock()
} }
} }
@ -136,7 +143,9 @@ retry:
if inc { if inc {
ec.Count++ ec.Count++
} else { } else {
ec.Count-- if ec.Count > 0 {
ec.Count--
}
} }
ec.Unlock() ec.Unlock()

View file

@ -1105,9 +1105,13 @@ func (n *network) getSvcRecords(ep *endpoint) []etchosts.Record {
} }
var recs []etchosts.Record var recs []etchosts.Record
sr, _ := n.ctrlr.svcRecords[n.id]
epName := ep.Name() epName := ep.Name()
n.ctrlr.Lock()
sr, _ := n.ctrlr.svcRecords[n.id]
n.ctrlr.Unlock()
for h, ip := range sr.svcMap { for h, ip := range sr.svcMap {
if strings.Split(h, ".")[0] == epName { if strings.Split(h, ".")[0] == epName {
continue continue

View file

@ -81,7 +81,7 @@ func (nDB *NetworkDB) RemoveKey(key []byte) {
func (nDB *NetworkDB) clusterInit() error { func (nDB *NetworkDB) clusterInit() error {
config := memberlist.DefaultLANConfig() config := memberlist.DefaultLANConfig()
config.Name = nDB.config.NodeName config.Name = nDB.config.NodeName
config.BindAddr = nDB.config.BindAddr config.AdvertiseAddr = nDB.config.AdvertiseAddr
if nDB.config.BindPort != 0 { if nDB.config.BindPort != 0 {
config.BindPort = nDB.config.BindPort config.BindPort = nDB.config.BindPort

View file

@ -107,9 +107,9 @@ type Config struct {
// NodeName is the cluster wide unique name for this node. // NodeName is the cluster wide unique name for this node.
NodeName string NodeName string
// BindAddr is the local node's IP address that we bind to for // AdvertiseAddr is the node's IP address that we advertise for
// cluster communication. // cluster communication.
BindAddr string AdvertiseAddr string
// BindPort is the local node's port to which we bind to for // BindPort is the local node's port to which we bind to for
// cluster communication. // cluster communication.

View file

@ -303,6 +303,7 @@ func (n *networkNamespace) AddInterface(srcName, dstPrefix string, options ...If
for err = nlh.LinkSetUp(iface); err != nil && cnt < 3; cnt++ { for err = nlh.LinkSetUp(iface); err != nil && cnt < 3; cnt++ {
log.Debugf("retrying link setup because of: %v", err) log.Debugf("retrying link setup because of: %v", err)
time.Sleep(10 * time.Millisecond) time.Sleep(10 * time.Millisecond)
err = nlh.LinkSetUp(iface)
} }
if err != nil { if err != nil {
return fmt.Errorf("failed to set link up: %v", err) return fmt.Errorf("failed to set link up: %v", err)

View file

@ -6,6 +6,7 @@ import (
"net" "net"
"os" "os"
"os/exec" "os/exec"
"path/filepath"
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
@ -21,7 +22,7 @@ import (
"github.com/vishvananda/netns" "github.com/vishvananda/netns"
) )
const prefix = "/var/run/docker/netns" const defaultPrefix = "/var/run/docker"
var ( var (
once sync.Once once sync.Once
@ -30,6 +31,7 @@ var (
gpmWg sync.WaitGroup gpmWg sync.WaitGroup
gpmCleanupPeriod = 60 * time.Second gpmCleanupPeriod = 60 * time.Second
gpmChan = make(chan chan struct{}) gpmChan = make(chan chan struct{})
prefix = defaultPrefix
) )
// The networkNamespace type is the linux implementation of the Sandbox // The networkNamespace type is the linux implementation of the Sandbox
@ -48,12 +50,21 @@ type networkNamespace struct {
sync.Mutex sync.Mutex
} }
// SetBasePath sets the base url prefix for the ns path
func SetBasePath(path string) {
prefix = path
}
func init() { func init() {
reexec.Register("netns-create", reexecCreateNamespace) reexec.Register("netns-create", reexecCreateNamespace)
} }
func basePath() string {
return filepath.Join(prefix, "netns")
}
func createBasePath() { func createBasePath() {
err := os.MkdirAll(prefix, 0755) err := os.MkdirAll(basePath(), 0755)
if err != nil { if err != nil {
panic("Could not create net namespace path directory") panic("Could not create net namespace path directory")
} }
@ -142,7 +153,7 @@ func GenerateKey(containerID string) string {
indexStr string indexStr string
tmpkey string tmpkey string
) )
dir, err := ioutil.ReadDir(prefix) dir, err := ioutil.ReadDir(basePath())
if err != nil { if err != nil {
return "" return ""
} }
@ -172,7 +183,7 @@ func GenerateKey(containerID string) string {
maxLen = len(containerID) maxLen = len(containerID)
} }
return prefix + "/" + containerID[:maxLen] return basePath() + "/" + containerID[:maxLen]
} }
// NewSandbox provides a new sandbox instance created in an os specific way // NewSandbox provides a new sandbox instance created in an os specific way

View file

@ -10,3 +10,7 @@ func GC() {
func GetSandboxForExternalKey(path string, key string) (Sandbox, error) { func GetSandboxForExternalKey(path string, key string) (Sandbox, error) {
return nil, nil return nil, nil
} }
// SetBasePath sets the base url prefix for the ns path
func SetBasePath(path string) {
}

View file

@ -37,3 +37,7 @@ func InitOSContext() func() {
func SetupTestOSContext(t *testing.T) func() { func SetupTestOSContext(t *testing.T) func() {
return func() {} return func() {}
} }
// SetBasePath sets the base url prefix for the ns path
func SetBasePath(path string) {
}

View file

@ -38,3 +38,7 @@ func InitOSContext() func() {
func SetupTestOSContext(t *testing.T) func() { func SetupTestOSContext(t *testing.T) func() {
return func() {} return func() {}
} }
// SetBasePath sets the base url prefix for the ns path
func SetBasePath(path string) {
}

View file

@ -413,7 +413,12 @@ func (sb *sandbox) ResolveIP(ip string) string {
for _, ep := range sb.getConnectedEndpoints() { for _, ep := range sb.getConnectedEndpoints() {
n := ep.getNetwork() n := ep.getNetwork()
sr, ok := n.getController().svcRecords[n.ID()] c := n.getController()
c.Lock()
sr, ok := c.svcRecords[n.ID()]
c.Unlock()
if !ok { if !ok {
continue continue
} }
@ -454,7 +459,12 @@ func (sb *sandbox) ResolveService(name string) ([]*net.SRV, []net.IP, error) {
for _, ep := range sb.getConnectedEndpoints() { for _, ep := range sb.getConnectedEndpoints() {
n := ep.getNetwork() n := ep.getNetwork()
sr, ok := n.getController().svcRecords[n.ID()] c := n.getController()
c.Lock()
sr, ok := c.svcRecords[n.ID()]
c.Unlock()
if !ok { if !ok {
continue continue
} }
@ -575,7 +585,11 @@ func (sb *sandbox) resolveName(req string, networkName string, epList []*endpoin
ep.Unlock() ep.Unlock()
} }
sr, ok := n.getController().svcRecords[n.ID()] c := n.getController()
c.Lock()
sr, ok := c.svcRecords[n.ID()]
c.Unlock()
if !ok { if !ok {
continue continue
} }

View file

@ -15,7 +15,7 @@ import (
"github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/configs"
) )
const udsBase = "/var/lib/docker/network/files/" const udsBase = "/run/docker/libnetwork/"
const success = "success" const success = "success"
// processSetKeyReexec is a private function that must be called only on an reexec path // processSetKeyReexec is a private function that must be called only on an reexec path

View file

@ -2,10 +2,13 @@ package exec
import ( import (
"fmt" "fmt"
"reflect"
"time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/docker/swarmkit/api" "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log" "github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/protobuf/ptypes"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -182,6 +185,10 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus,
// is completed. // is completed.
defer func() { defer func() {
logStateChange(ctx, task.DesiredState, task.Status.State, status.State) logStateChange(ctx, task.DesiredState, task.Status.State, status.State)
if !reflect.DeepEqual(status, task.Status) {
status.Timestamp = ptypes.MustTimestampProto(time.Now())
}
}() }()
// extract the container status from the container, if supported. // extract the container status from the container, if supported.

View file

@ -31,10 +31,10 @@ const stateFilename = "state.json"
// NodeConfig provides values for a Node. // NodeConfig provides values for a Node.
type NodeConfig struct { type NodeConfig struct {
// Hostname the name of host for agent instance. // Hostname is the name of host for agent instance.
Hostname string Hostname string
// JoinAddrs specifies node that should be used for the initial connection to // JoinAddr specifies node that should be used for the initial connection to
// other manager in cluster. This should be only one address and optional, // other manager in cluster. This should be only one address and optional,
// the actual remotes come from the stored state. // the actual remotes come from the stored state.
JoinAddr string JoinAddr string
@ -60,6 +60,10 @@ type NodeConfig struct {
// and raft members connect to. // and raft members connect to.
ListenRemoteAPI string ListenRemoteAPI string
// AdvertiseRemoteAPI specifies the address that should be advertised
// for connections to the remote API (including the raft service).
AdvertiseRemoteAPI string
// Executor specifies the executor to use for the agent. // Executor specifies the executor to use for the agent.
Executor exec.Executor Executor exec.Executor
@ -425,6 +429,9 @@ func (n *Node) CertificateRequested() <-chan struct{} {
func (n *Node) setControlSocket(conn *grpc.ClientConn) { func (n *Node) setControlSocket(conn *grpc.ClientConn) {
n.Lock() n.Lock()
if n.conn != nil {
n.conn.Close()
}
n.conn = conn n.conn = conn
n.connCond.Broadcast() n.connCond.Broadcast()
n.Unlock() n.Unlock()
@ -478,7 +485,7 @@ func (n *Node) NodeMembership() api.NodeSpec_Membership {
return n.nodeMembership return n.nodeMembership
} }
// Manager return manager instance started by node. May be nil. // Manager returns manager instance started by node. May be nil.
func (n *Node) Manager() *manager.Manager { func (n *Node) Manager() *manager.Manager {
n.RLock() n.RLock()
defer n.RUnlock() defer n.RUnlock()
@ -542,6 +549,8 @@ func (n *Node) initManagerConnection(ctx context.Context, ready chan<- struct{})
opts := []grpc.DialOption{} opts := []grpc.DialOption{}
insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true})
opts = append(opts, grpc.WithTransportCredentials(insecureCreds)) opts = append(opts, grpc.WithTransportCredentials(insecureCreds))
// Using listen address instead of advertised address because this is a
// local connection.
addr := n.config.ListenControlAPI addr := n.config.ListenControlAPI
opts = append(opts, grpc.WithDialer( opts = append(opts, grpc.WithDialer(
func(addr string, timeout time.Duration) (net.Conn, error) { func(addr string, timeout time.Duration) (net.Conn, error) {
@ -571,11 +580,11 @@ func (n *Node) initManagerConnection(ctx context.Context, ready chan<- struct{})
} }
} }
func (n *Node) waitRole(ctx context.Context, role string) error { func (n *Node) waitRole(ctx context.Context, role string) {
n.roleCond.L.Lock() n.roleCond.L.Lock()
if role == n.role { if role == n.role {
n.roleCond.L.Unlock() n.roleCond.L.Unlock()
return nil return
} }
finishCh := make(chan struct{}) finishCh := make(chan struct{})
defer close(finishCh) defer close(finishCh)
@ -591,17 +600,14 @@ func (n *Node) waitRole(ctx context.Context, role string) error {
for role != n.role { for role != n.role {
n.roleCond.Wait() n.roleCond.Wait()
if ctx.Err() != nil { if ctx.Err() != nil {
return ctx.Err() return
} }
} }
return nil
} }
func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig, ready chan struct{}) error { func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig, ready chan struct{}) error {
for { for {
if err := n.waitRole(ctx, ca.ManagerRole); err != nil { n.waitRole(ctx, ca.ManagerRole)
return err
}
if ctx.Err() != nil { if ctx.Err() != nil {
return ctx.Err() return ctx.Err()
} }
@ -612,6 +618,7 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
"tcp": n.config.ListenRemoteAPI, "tcp": n.config.ListenRemoteAPI,
"unix": n.config.ListenControlAPI, "unix": n.config.ListenControlAPI,
}, },
AdvertiseAddr: n.config.AdvertiseRemoteAPI,
SecurityConfig: securityConfig, SecurityConfig: securityConfig,
ExternalCAs: n.config.ExternalCAs, ExternalCAs: n.config.ExternalCAs,
JoinRaft: remoteAddr.Addr, JoinRaft: remoteAddr.Addr,
@ -647,25 +654,24 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
ready = nil ready = nil
} }
if err := n.waitRole(ctx, ca.AgentRole); err != nil { n.waitRole(ctx, ca.AgentRole)
m.Stop(context.Background())
} n.Lock()
n.manager = nil
n.Unlock()
select { select {
case <-done: case <-done:
case <-ctx.Done(): case <-ctx.Done():
err = ctx.Err()
m.Stop(context.Background()) m.Stop(context.Background())
return ctx.Err() <-done
} }
connCancel() connCancel()
n.Lock() if err != nil {
n.manager = nil return err
if n.conn != nil {
n.conn.Close()
} }
n.Unlock()
} }
} }

View file

@ -118,10 +118,10 @@ func WalkTaskStatus(tx *bolt.Tx, fn func(id string, status *api.TaskStatus) erro
// PutTask places the task into the database. // PutTask places the task into the database.
func PutTask(tx *bolt.Tx, task *api.Task) error { func PutTask(tx *bolt.Tx, task *api.Task) error {
return withCreateTaskBucketIfNotExists(tx, task.ID, func(bkt *bolt.Bucket) error { return withCreateTaskBucketIfNotExists(tx, task.ID, func(bkt *bolt.Bucket) error {
task = task.Copy() taskCopy := *task
task.Status = api.TaskStatus{} // blank out the status. taskCopy.Status = api.TaskStatus{} // blank out the status.
p, err := proto.Marshal(task) p, err := proto.Marshal(&taskCopy)
if err != nil { if err != nil {
return err return err
} }

View file

@ -247,10 +247,11 @@ func (tm *taskManager) run(ctx context.Context) {
// //
// This used to decide whether or not to propagate a task update to a controller. // This used to decide whether or not to propagate a task update to a controller.
func tasksEqual(a, b *api.Task) bool { func tasksEqual(a, b *api.Task) bool {
a, b = a.Copy(), b.Copy() // shallow copy
copyA, copyB := *a, *b
a.Status, b.Status = api.TaskStatus{}, api.TaskStatus{} copyA.Status, copyB.Status = api.TaskStatus{}, api.TaskStatus{}
a.Meta, b.Meta = api.Meta{}, api.Meta{} copyA.Meta, copyB.Meta = api.Meta{}, api.Meta{}
return reflect.DeepEqual(a, b) return reflect.DeepEqual(&copyA, &copyB)
} }

View file

@ -71,6 +71,9 @@ type Service struct {
// the optional fields like node_port or virtual_ip and it // the optional fields like node_port or virtual_ip and it
// could be auto allocated by the system. // could be auto allocated by the system.
Endpoint *Endpoint `protobuf:"bytes,4,opt,name=endpoint" json:"endpoint,omitempty"` Endpoint *Endpoint `protobuf:"bytes,4,opt,name=endpoint" json:"endpoint,omitempty"`
// UpdateStatus contains the status of an update, if one is in
// progress.
UpdateStatus *UpdateStatus `protobuf:"bytes,5,opt,name=update_status,json=updateStatus" json:"update_status,omitempty"`
} }
func (m *Service) Reset() { *m = Service{} } func (m *Service) Reset() { *m = Service{} }
@ -278,10 +281,11 @@ func (m *Service) Copy() *Service {
} }
o := &Service{ o := &Service{
ID: m.ID, ID: m.ID,
Meta: *m.Meta.Copy(), Meta: *m.Meta.Copy(),
Spec: *m.Spec.Copy(), Spec: *m.Spec.Copy(),
Endpoint: m.Endpoint.Copy(), Endpoint: m.Endpoint.Copy(),
UpdateStatus: m.UpdateStatus.Copy(),
} }
return o return o
@ -464,7 +468,7 @@ func (this *Service) GoString() string {
if this == nil { if this == nil {
return "nil" return "nil"
} }
s := make([]string, 0, 8) s := make([]string, 0, 9)
s = append(s, "&api.Service{") s = append(s, "&api.Service{")
s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
s = append(s, "Meta: "+strings.Replace(this.Meta.GoString(), `&`, ``, 1)+",\n") s = append(s, "Meta: "+strings.Replace(this.Meta.GoString(), `&`, ``, 1)+",\n")
@ -472,6 +476,9 @@ func (this *Service) GoString() string {
if this.Endpoint != nil { if this.Endpoint != nil {
s = append(s, "Endpoint: "+fmt.Sprintf("%#v", this.Endpoint)+",\n") s = append(s, "Endpoint: "+fmt.Sprintf("%#v", this.Endpoint)+",\n")
} }
if this.UpdateStatus != nil {
s = append(s, "UpdateStatus: "+fmt.Sprintf("%#v", this.UpdateStatus)+",\n")
}
s = append(s, "}") s = append(s, "}")
return strings.Join(s, "") return strings.Join(s, "")
} }
@ -785,6 +792,16 @@ func (m *Service) MarshalTo(data []byte) (int, error) {
} }
i += n13 i += n13
} }
if m.UpdateStatus != nil {
data[i] = 0x2a
i++
i = encodeVarintObjects(data, i, uint64(m.UpdateStatus.Size()))
n14, err := m.UpdateStatus.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n14
}
return i, nil return i, nil
} }
@ -807,11 +824,11 @@ func (m *Endpoint) MarshalTo(data []byte) (int, error) {
data[i] = 0xa data[i] = 0xa
i++ i++
i = encodeVarintObjects(data, i, uint64(m.Spec.Size())) i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
n14, err := m.Spec.MarshalTo(data[i:]) n15, err := m.Spec.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n14 i += n15
} }
if len(m.Ports) > 0 { if len(m.Ports) > 0 {
for _, msg := range m.Ports { for _, msg := range m.Ports {
@ -894,19 +911,19 @@ func (m *Task) MarshalTo(data []byte) (int, error) {
data[i] = 0x12 data[i] = 0x12
i++ i++
i = encodeVarintObjects(data, i, uint64(m.Meta.Size())) i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
n15, err := m.Meta.MarshalTo(data[i:]) n16, err := m.Meta.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n15
data[i] = 0x1a
i++
i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
n16, err := m.Spec.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n16 i += n16
data[i] = 0x1a
i++
i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
n17, err := m.Spec.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n17
if len(m.ServiceID) > 0 { if len(m.ServiceID) > 0 {
data[i] = 0x22 data[i] = 0x22
i++ i++
@ -927,27 +944,27 @@ func (m *Task) MarshalTo(data []byte) (int, error) {
data[i] = 0x3a data[i] = 0x3a
i++ i++
i = encodeVarintObjects(data, i, uint64(m.Annotations.Size())) i = encodeVarintObjects(data, i, uint64(m.Annotations.Size()))
n17, err := m.Annotations.MarshalTo(data[i:]) n18, err := m.Annotations.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n17
data[i] = 0x42
i++
i = encodeVarintObjects(data, i, uint64(m.ServiceAnnotations.Size()))
n18, err := m.ServiceAnnotations.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n18 i += n18
data[i] = 0x4a data[i] = 0x42
i++ i++
i = encodeVarintObjects(data, i, uint64(m.Status.Size())) i = encodeVarintObjects(data, i, uint64(m.ServiceAnnotations.Size()))
n19, err := m.Status.MarshalTo(data[i:]) n19, err := m.ServiceAnnotations.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n19 i += n19
data[i] = 0x4a
i++
i = encodeVarintObjects(data, i, uint64(m.Status.Size()))
n20, err := m.Status.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n20
if m.DesiredState != 0 { if m.DesiredState != 0 {
data[i] = 0x50 data[i] = 0x50
i++ i++
@ -969,21 +986,21 @@ func (m *Task) MarshalTo(data []byte) (int, error) {
data[i] = 0x62 data[i] = 0x62
i++ i++
i = encodeVarintObjects(data, i, uint64(m.Endpoint.Size())) i = encodeVarintObjects(data, i, uint64(m.Endpoint.Size()))
n20, err := m.Endpoint.MarshalTo(data[i:]) n21, err := m.Endpoint.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n20 i += n21
} }
if m.LogDriver != nil { if m.LogDriver != nil {
data[i] = 0x6a data[i] = 0x6a
i++ i++
i = encodeVarintObjects(data, i, uint64(m.LogDriver.Size())) i = encodeVarintObjects(data, i, uint64(m.LogDriver.Size()))
n21, err := m.LogDriver.MarshalTo(data[i:]) n22, err := m.LogDriver.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n21 i += n22
} }
return i, nil return i, nil
} }
@ -1007,11 +1024,11 @@ func (m *NetworkAttachment) MarshalTo(data []byte) (int, error) {
data[i] = 0xa data[i] = 0xa
i++ i++
i = encodeVarintObjects(data, i, uint64(m.Network.Size())) i = encodeVarintObjects(data, i, uint64(m.Network.Size()))
n22, err := m.Network.MarshalTo(data[i:]) n23, err := m.Network.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n22 i += n23
} }
if len(m.Addresses) > 0 { if len(m.Addresses) > 0 {
for _, s := range m.Addresses { for _, s := range m.Addresses {
@ -1070,38 +1087,38 @@ func (m *Network) MarshalTo(data []byte) (int, error) {
data[i] = 0x12 data[i] = 0x12
i++ i++
i = encodeVarintObjects(data, i, uint64(m.Meta.Size())) i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
n23, err := m.Meta.MarshalTo(data[i:]) n24, err := m.Meta.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n23
data[i] = 0x1a
i++
i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
n24, err := m.Spec.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n24 i += n24
data[i] = 0x1a
i++
i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
n25, err := m.Spec.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n25
if m.DriverState != nil { if m.DriverState != nil {
data[i] = 0x22 data[i] = 0x22
i++ i++
i = encodeVarintObjects(data, i, uint64(m.DriverState.Size())) i = encodeVarintObjects(data, i, uint64(m.DriverState.Size()))
n25, err := m.DriverState.MarshalTo(data[i:]) n26, err := m.DriverState.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n25 i += n26
} }
if m.IPAM != nil { if m.IPAM != nil {
data[i] = 0x2a data[i] = 0x2a
i++ i++
i = encodeVarintObjects(data, i, uint64(m.IPAM.Size())) i = encodeVarintObjects(data, i, uint64(m.IPAM.Size()))
n26, err := m.IPAM.MarshalTo(data[i:]) n27, err := m.IPAM.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n26 i += n27
} }
return i, nil return i, nil
} }
@ -1130,27 +1147,27 @@ func (m *Cluster) MarshalTo(data []byte) (int, error) {
data[i] = 0x12 data[i] = 0x12
i++ i++
i = encodeVarintObjects(data, i, uint64(m.Meta.Size())) i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
n27, err := m.Meta.MarshalTo(data[i:]) n28, err := m.Meta.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n27
data[i] = 0x1a
i++
i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
n28, err := m.Spec.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n28 i += n28
data[i] = 0x22 data[i] = 0x1a
i++ i++
i = encodeVarintObjects(data, i, uint64(m.RootCA.Size())) i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
n29, err := m.RootCA.MarshalTo(data[i:]) n29, err := m.Spec.MarshalTo(data[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n29 i += n29
data[i] = 0x22
i++
i = encodeVarintObjects(data, i, uint64(m.RootCA.Size()))
n30, err := m.RootCA.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n30
if len(m.NetworkBootstrapKeys) > 0 { if len(m.NetworkBootstrapKeys) > 0 {
for _, msg := range m.NetworkBootstrapKeys { for _, msg := range m.NetworkBootstrapKeys {
data[i] = 0x2a data[i] = 0x2a
@ -1260,6 +1277,10 @@ func (m *Service) Size() (n int) {
l = m.Endpoint.Size() l = m.Endpoint.Size()
n += 1 + l + sovObjects(uint64(l)) n += 1 + l + sovObjects(uint64(l))
} }
if m.UpdateStatus != nil {
l = m.UpdateStatus.Size()
n += 1 + l + sovObjects(uint64(l))
}
return n return n
} }
@ -1467,6 +1488,7 @@ func (this *Service) String() string {
`Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`,
`Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`, `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`,
`UpdateStatus:` + strings.Replace(fmt.Sprintf("%v", this.UpdateStatus), "UpdateStatus", "UpdateStatus", 1) + `,`,
`}`, `}`,
}, "") }, "")
return s return s
@ -2160,6 +2182,39 @@ func (m *Service) Unmarshal(data []byte) error {
return err return err
} }
iNdEx = postIndex iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field UpdateStatus", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowObjects
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthObjects
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.UpdateStatus == nil {
m.UpdateStatus = &UpdateStatus{}
}
if err := m.UpdateStatus.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipObjects(data[iNdEx:]) skippy, err := skipObjects(data[iNdEx:])
@ -3527,67 +3582,68 @@ var (
) )
var fileDescriptorObjects = []byte{ var fileDescriptorObjects = []byte{
// 981 bytes of a gzipped FileDescriptorProto // 1000 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x1b, 0x45, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x6f, 0x1b, 0x45,
0x14, 0xaf, 0xed, 0x8d, 0xed, 0x7d, 0x8e, 0x23, 0x31, 0x54, 0xd5, 0x36, 0x84, 0xa4, 0xb8, 0x02, 0x18, 0xae, 0x93, 0x8d, 0xed, 0x7d, 0x1d, 0x47, 0x62, 0xa8, 0xaa, 0x6d, 0x08, 0x49, 0x71, 0x05,
0x71, 0x40, 0xae, 0x28, 0x05, 0x81, 0xa0, 0x42, 0xb6, 0x13, 0x81, 0x05, 0x81, 0x68, 0x5a, 0x85, 0xe2, 0x80, 0x5c, 0x51, 0x0a, 0xa2, 0x82, 0x0a, 0xd9, 0x4e, 0x04, 0x16, 0x04, 0xa2, 0x69, 0x09,
0xe3, 0x6a, 0xb2, 0x3b, 0x35, 0x8b, 0xed, 0xdd, 0xd5, 0xcc, 0x24, 0x55, 0x6e, 0x88, 0x0f, 0xc0, 0xc7, 0xd5, 0x64, 0x77, 0x6a, 0x16, 0xdb, 0xbb, 0xab, 0x99, 0x71, 0xaa, 0xdc, 0x10, 0x3f, 0x00,
0x47, 0xe0, 0xab, 0x70, 0x8d, 0x10, 0x07, 0x8e, 0x9c, 0x2a, 0xda, 0x1b, 0x27, 0xf8, 0x08, 0xbc, 0x89, 0x3f, 0xc0, 0x5f, 0xe1, 0x9a, 0x03, 0x07, 0x8e, 0x9c, 0x2a, 0xda, 0x1b, 0x27, 0xf8, 0x09,
0xf9, 0xb3, 0xf6, 0x56, 0x5e, 0x87, 0x56, 0xaa, 0x72, 0x58, 0x69, 0xfe, 0xfc, 0x7e, 0xbf, 0x79, 0xbc, 0xf3, 0xb1, 0xf6, 0x46, 0x5e, 0x87, 0x56, 0xaa, 0x72, 0x58, 0x69, 0x3e, 0x9e, 0xe7, 0x99,
0xef, 0xcd, 0x7b, 0x6f, 0x07, 0xba, 0xd9, 0xc9, 0x0f, 0x3c, 0x52, 0xb2, 0x9f, 0x8b, 0x4c, 0x65, 0xf7, 0x6b, 0xde, 0x59, 0x68, 0x67, 0x27, 0x3f, 0xf0, 0x48, 0xc9, 0x6e, 0x2e, 0x32, 0x95, 0x11,
0x84, 0xc4, 0x59, 0x34, 0xe5, 0xa2, 0x2f, 0x1f, 0x33, 0x31, 0x9f, 0x26, 0xaa, 0x7f, 0xf6, 0xfe, 0x12, 0x67, 0xd1, 0x98, 0x8b, 0xae, 0x7c, 0xc2, 0xc4, 0x74, 0x9c, 0xa8, 0xee, 0xe9, 0xfb, 0xdb,
0x76, 0x47, 0x9d, 0xe7, 0xdc, 0x01, 0xb6, 0x3b, 0x32, 0xe7, 0x51, 0x31, 0xb9, 0xa9, 0x92, 0x39, 0x2d, 0x75, 0x96, 0x73, 0x07, 0xd8, 0x6e, 0xc9, 0x9c, 0x47, 0xc5, 0xe4, 0xa6, 0x4a, 0xa6, 0x5c,
0x97, 0x8a, 0xcd, 0xf3, 0x3b, 0x8b, 0x91, 0xdb, 0xba, 0x3e, 0xc9, 0x26, 0x99, 0x19, 0xde, 0xd1, 0x2a, 0x36, 0xcd, 0xef, 0xcc, 0x47, 0x6e, 0xeb, 0xfa, 0x28, 0x1b, 0x65, 0x66, 0x78, 0x47, 0x8f,
0x23, 0xbb, 0xda, 0xfb, 0xb5, 0x06, 0xde, 0x21, 0x57, 0x8c, 0x7c, 0x0a, 0xad, 0x33, 0x2e, 0x64, 0xec, 0x6a, 0xe7, 0xb7, 0x1a, 0x78, 0x87, 0x5c, 0x31, 0xf2, 0x09, 0x34, 0x4e, 0xb9, 0x90, 0x49,
0x92, 0xa5, 0x41, 0xed, 0x56, 0xed, 0xdd, 0xce, 0xdd, 0x37, 0xfa, 0xab, 0x27, 0xf7, 0x8f, 0x2d, 0x96, 0x06, 0xb5, 0x5b, 0xb5, 0x77, 0x5b, 0x77, 0xdf, 0xe8, 0x2e, 0x9f, 0xdc, 0x3d, 0xb6, 0x90,
0x64, 0xe8, 0x5d, 0x3c, 0xd9, 0xbb, 0x46, 0x0b, 0x06, 0xf9, 0x0c, 0x20, 0x12, 0x9c, 0x29, 0x1e, 0xbe, 0x77, 0xfe, 0x74, 0xef, 0x1a, 0x2d, 0x18, 0xe4, 0x53, 0x80, 0x48, 0x70, 0xa6, 0x78, 0x1c,
0x87, 0x4c, 0x05, 0x75, 0xc3, 0x7f, 0xb3, 0x8a, 0xff, 0xb0, 0x30, 0x8a, 0xfa, 0x8e, 0x30, 0x50, 0x32, 0x15, 0xac, 0x19, 0xfe, 0x9b, 0x55, 0xfc, 0x47, 0x85, 0x51, 0xd4, 0x77, 0x84, 0x9e, 0xd2,
0x9a, 0x7d, 0x9a, 0xc7, 0x05, 0xbb, 0xf1, 0x42, 0x6c, 0x47, 0x18, 0xa8, 0xde, 0xdf, 0x0d, 0xf0, 0xec, 0x59, 0x1e, 0x17, 0xec, 0xf5, 0x17, 0x62, 0x3b, 0x42, 0x4f, 0x75, 0xfe, 0x5e, 0x07, 0xef,
0xbe, 0xc9, 0x62, 0x4e, 0x6e, 0x40, 0x3d, 0x89, 0x8d, 0xf1, 0xfe, 0xb0, 0xf9, 0xec, 0xc9, 0x5e, 0xeb, 0x2c, 0xe6, 0xe4, 0x06, 0xac, 0x25, 0xb1, 0x31, 0xde, 0xef, 0xd7, 0x9f, 0x3f, 0xdd, 0x5b,
0x7d, 0xbc, 0x4f, 0x71, 0x85, 0xdc, 0x05, 0x6f, 0x8e, 0x1e, 0x3a, 0xb3, 0x82, 0x2a, 0x61, 0x1d, 0x1b, 0xee, 0x53, 0x5c, 0x21, 0x77, 0xc1, 0x9b, 0xa2, 0x87, 0xce, 0xac, 0xa0, 0x4a, 0x58, 0x47,
0x01, 0xe7, 0x93, 0xc1, 0x92, 0x8f, 0xc0, 0xd3, 0x61, 0x75, 0xc6, 0xec, 0x54, 0x71, 0xf4, 0x99, 0xc0, 0xf9, 0x64, 0xb0, 0xe4, 0x23, 0xf0, 0x74, 0x58, 0x9d, 0x31, 0x3b, 0x55, 0x1c, 0x7d, 0xe6,
0x0f, 0x10, 0x53, 0xf0, 0x34, 0x9e, 0x1c, 0x40, 0x27, 0xe6, 0x32, 0x12, 0x49, 0xae, 0x74, 0x24, 0x43, 0xc4, 0x14, 0x3c, 0x8d, 0x27, 0x07, 0xd0, 0x8a, 0xb9, 0x8c, 0x44, 0x92, 0x2b, 0x1d, 0x49,
0x3d, 0x43, 0xbf, 0xbd, 0x8e, 0xbe, 0xbf, 0x84, 0xd2, 0x32, 0x0f, 0x23, 0xd2, 0x44, 0x3f, 0xd5, 0xcf, 0xd0, 0x6f, 0xaf, 0xa2, 0xef, 0x2f, 0xa0, 0xb4, 0xcc, 0xc3, 0x88, 0xd4, 0xd1, 0x4f, 0x35,
0xa9, 0x0c, 0x36, 0x8c, 0xc2, 0xee, 0x5a, 0x03, 0x0c, 0xca, 0x99, 0xe0, 0x38, 0xe4, 0x4b, 0xd8, 0x93, 0xc1, 0x86, 0x51, 0xd8, 0x5d, 0x69, 0x80, 0x41, 0x39, 0x13, 0x1c, 0x87, 0x7c, 0x01, 0x5b,
0x9a, 0xb3, 0x94, 0x4d, 0xb8, 0x08, 0x9d, 0x4a, 0xd3, 0xa8, 0xbc, 0x55, 0xe9, 0xba, 0x45, 0x5a, 0x53, 0x96, 0xb2, 0x11, 0x17, 0xa1, 0x53, 0xa9, 0x1b, 0x95, 0xb7, 0x2a, 0x5d, 0xb7, 0x48, 0x2b,
0x21, 0xda, 0x9d, 0x97, 0xa7, 0xe8, 0x0e, 0x30, 0xa5, 0x58, 0xf4, 0xfd, 0x9c, 0xa7, 0x2a, 0x68, 0x44, 0xdb, 0xd3, 0xf2, 0x14, 0xdd, 0x01, 0xa6, 0x14, 0x8b, 0xbe, 0x9f, 0xf2, 0x54, 0x05, 0x0d,
0x19, 0x95, 0xb7, 0x2b, 0x6d, 0xe1, 0xea, 0x71, 0x26, 0xa6, 0x83, 0x05, 0x98, 0x96, 0x88, 0xe4, 0xa3, 0xf2, 0x76, 0xa5, 0x2d, 0x5c, 0x3d, 0xc9, 0xc4, 0xb8, 0x37, 0x07, 0xd3, 0x12, 0x91, 0x7c,
0x0b, 0xe8, 0x44, 0x5c, 0xa8, 0xe4, 0x51, 0x12, 0xe1, 0xa5, 0x05, 0x6d, 0xa3, 0xb3, 0x57, 0xa5, 0x0e, 0xad, 0x88, 0x0b, 0x95, 0x3c, 0x4e, 0x22, 0x4c, 0x5a, 0xd0, 0x34, 0x3a, 0x7b, 0x55, 0x3a,
0x33, 0x5a, 0xc2, 0x9c, 0x53, 0x65, 0x66, 0xef, 0xb7, 0x1a, 0xb4, 0x1e, 0x70, 0x71, 0x96, 0x44, 0x83, 0x05, 0xcc, 0x39, 0x55, 0x66, 0x76, 0x7e, 0x59, 0x83, 0xc6, 0x43, 0x2e, 0x4e, 0x93, 0xe8,
0xaf, 0xf6, 0xba, 0x3f, 0x79, 0xee, 0xba, 0x2b, 0x2d, 0x73, 0xc7, 0xae, 0xdc, 0xf8, 0xc7, 0xd0, 0xd5, 0xa6, 0xfb, 0xfe, 0x85, 0x74, 0x57, 0x5a, 0xe6, 0x8e, 0x5d, 0xca, 0xf8, 0xc7, 0xd0, 0xe4,
0xe6, 0x69, 0x9c, 0x67, 0x09, 0x06, 0xc8, 0x5b, 0x9f, 0x2d, 0x07, 0x0e, 0x43, 0x17, 0xe8, 0xde, 0x69, 0x9c, 0x67, 0x09, 0x06, 0xc8, 0x5b, 0x5d, 0x2d, 0x07, 0x0e, 0x43, 0xe7, 0x68, 0x0c, 0x6e,
0x2f, 0x75, 0x68, 0x17, 0xcb, 0xe4, 0x9e, 0xb3, 0xc0, 0xd6, 0xde, 0xad, 0xcb, 0x24, 0xb4, 0x09, 0xdb, 0x56, 0x71, 0x78, 0x21, 0xd7, 0xb7, 0xaa, 0xe8, 0xdf, 0x1a, 0xa0, 0x4b, 0xd2, 0xe6, 0xac,
0xee, 0xf0, 0x7b, 0xb0, 0x91, 0x67, 0x42, 0x49, 0x74, 0xb6, 0xb1, 0x2e, 0x4d, 0x8e, 0x10, 0x30, 0x34, 0xeb, 0xfc, 0xba, 0x06, 0xcd, 0x42, 0x9d, 0xdc, 0x73, 0x8e, 0xd4, 0x56, 0x4b, 0x15, 0x58,
0xca, 0xd2, 0x47, 0xc9, 0x84, 0x5a, 0x30, 0xf9, 0x0e, 0x3a, 0x67, 0x89, 0x50, 0xa7, 0x6c, 0x16, 0xed, 0x89, 0xf3, 0xe1, 0x1e, 0x6c, 0xe4, 0x99, 0x50, 0x12, 0x63, 0xb6, 0xbe, 0xaa, 0xda, 0x8e,
0x26, 0xb9, 0x44, 0xa7, 0x35, 0xf7, 0x9d, 0xcb, 0x8e, 0xec, 0x1f, 0x5b, 0xfc, 0xf8, 0x68, 0xb8, 0x10, 0x30, 0xc8, 0xd2, 0xc7, 0xc9, 0x88, 0x5a, 0x30, 0xf9, 0x0e, 0x5a, 0xa7, 0x89, 0x50, 0x33,
0x85, 0xa1, 0x86, 0xc5, 0x54, 0x52, 0x70, 0x52, 0xe3, 0x5c, 0x6e, 0x1f, 0x82, 0xbf, 0xd8, 0x21, 0x36, 0x09, 0x93, 0x5c, 0x62, 0xec, 0x34, 0xf7, 0x9d, 0xcb, 0x8e, 0xec, 0x1e, 0x5b, 0xfc, 0xf0,
0xef, 0x01, 0xa4, 0x36, 0x2b, 0xc2, 0xc5, 0x3d, 0x75, 0x91, 0xec, 0xbb, 0x5c, 0xc1, 0xeb, 0xf2, 0xa8, 0xbf, 0x85, 0x19, 0x83, 0xf9, 0x54, 0x52, 0x70, 0x52, 0xc3, 0x5c, 0x6e, 0x1f, 0x82, 0x3f,
0x1d, 0x60, 0x1c, 0x13, 0x02, 0x1e, 0x8b, 0x63, 0x61, 0x6e, 0xcd, 0xa7, 0x66, 0xdc, 0xfb, 0x7d, 0xdf, 0x21, 0xef, 0x01, 0xa4, 0xb6, 0xb8, 0xc2, 0x79, 0xba, 0xdb, 0x48, 0xf6, 0x5d, 0xc9, 0x61,
0x03, 0xbc, 0x87, 0x4c, 0x4e, 0xaf, 0xba, 0xb2, 0xf5, 0x99, 0x2b, 0xf7, 0x8c, 0xee, 0x48, 0x9b, 0xd6, 0x7d, 0x07, 0x18, 0xc6, 0x84, 0x80, 0xc7, 0xe2, 0x58, 0x98, 0xe4, 0xfb, 0xd4, 0x8c, 0x3b,
0x02, 0xda, 0x1d, 0x6f, 0xe9, 0x8e, 0x4b, 0x0c, 0xed, 0x8e, 0x03, 0x58, 0x77, 0xe4, 0x2c, 0x53, 0xbf, 0x6f, 0x80, 0xf7, 0x88, 0xc9, 0xf1, 0x55, 0x37, 0x08, 0x7d, 0xe6, 0x52, 0xb9, 0xa0, 0x3b,
0xa6, 0x7c, 0x3d, 0x6a, 0xc6, 0xe4, 0x36, 0xb4, 0x52, 0x2c, 0x59, 0x4d, 0x6f, 0x1a, 0x3a, 0x20, 0xd2, 0x56, 0x92, 0x76, 0xc7, 0x5b, 0xb8, 0xe3, 0xea, 0x4b, 0xbb, 0xe3, 0x00, 0xd6, 0x1d, 0x39,
0xbd, 0xa9, 0xab, 0x18, 0xb9, 0x4d, 0xbd, 0x85, 0x44, 0x2c, 0x15, 0x96, 0xa6, 0x19, 0x96, 0x1f, 0xc9, 0x94, 0xa9, 0x0c, 0x8f, 0x9a, 0x31, 0xb9, 0x0d, 0x8d, 0x14, 0x6f, 0xbe, 0xa6, 0xd7, 0x0d,
0xf6, 0x01, 0xe9, 0x4a, 0xae, 0x32, 0x21, 0x07, 0x4b, 0x58, 0x51, 0x2a, 0x25, 0x26, 0x39, 0x86, 0x1d, 0x90, 0x5e, 0xd7, 0xcd, 0x00, 0xb9, 0x75, 0xbd, 0x85, 0x44, 0xbc, 0x71, 0x2c, 0x4d, 0x33,
0xd7, 0x0b, 0x7b, 0xcb, 0x82, 0xed, 0x97, 0x11, 0x24, 0x4e, 0xa1, 0xb4, 0x53, 0x6a, 0x4d, 0xfe, 0xac, 0x10, 0x6c, 0x27, 0xd2, 0xdd, 0xdc, 0xca, 0xba, 0xee, 0x2d, 0x60, 0xc5, 0x8d, 0x2b, 0x31,
0xfa, 0xd6, 0x64, 0x22, 0x58, 0xd5, 0x9a, 0x86, 0xd0, 0xc5, 0x3e, 0x97, 0x08, 0x6c, 0xf5, 0x7a, 0xc9, 0x31, 0xbc, 0x5e, 0xd8, 0x5b, 0x16, 0x6c, 0xbe, 0x8c, 0x20, 0x71, 0x0a, 0xa5, 0x9d, 0x52,
0x85, 0x07, 0x80, 0x22, 0x5b, 0x6b, 0xba, 0xbd, 0x13, 0xe1, 0x74, 0xd3, 0x71, 0xcc, 0x8c, 0x0c, 0x87, 0xf3, 0x57, 0x77, 0x38, 0x13, 0xc1, 0xaa, 0x0e, 0xd7, 0x87, 0x36, 0xb6, 0xcb, 0x44, 0xe0,
0xa0, 0xed, 0xf2, 0x46, 0x06, 0x1d, 0x93, 0xbb, 0x2f, 0xd8, 0x92, 0x16, 0xb4, 0xe7, 0x8a, 0x76, 0x8b, 0xa1, 0x57, 0x78, 0x00, 0x28, 0xb2, 0xb5, 0xe2, 0xd1, 0x70, 0x22, 0x9c, 0x6e, 0x3a, 0x8e,
0xf3, 0x65, 0x8a, 0x16, 0x3b, 0x05, 0xcc, 0xb2, 0x49, 0x18, 0x8b, 0x04, 0xff, 0x7d, 0x41, 0xd7, 0x99, 0x91, 0x1e, 0x34, 0x5d, 0xdd, 0xc8, 0xa0, 0x65, 0x6a, 0xf7, 0x05, 0x3b, 0xdb, 0x9c, 0x76,
0x70, 0xb7, 0xab, 0xb8, 0xfb, 0x06, 0x41, 0x7d, 0x44, 0xdb, 0x61, 0xef, 0xa7, 0x1a, 0xbc, 0xb6, 0xe1, 0xee, 0x6f, 0xbe, 0xd4, 0xdd, 0xbf, 0x0f, 0x30, 0xc9, 0x46, 0x61, 0x2c, 0x12, 0x7c, 0x42,
0x62, 0x14, 0xf9, 0x10, 0xb3, 0xc2, 0x2e, 0x5e, 0xf6, 0xdf, 0x75, 0x3c, 0x5a, 0x60, 0xc9, 0x0e, 0x83, 0xb6, 0xe1, 0x6e, 0x57, 0x71, 0xf7, 0x0d, 0x82, 0xfa, 0x88, 0xb6, 0xc3, 0xce, 0x4f, 0x35,
0xf8, 0xba, 0x46, 0xb8, 0x94, 0xdc, 0x56, 0xbf, 0x4f, 0x97, 0x0b, 0x24, 0x80, 0x16, 0x9b, 0x25, 0x78, 0x6d, 0xc9, 0x28, 0xf2, 0x21, 0x56, 0x85, 0x5d, 0xbc, 0xec, 0xf9, 0x76, 0x3c, 0x5a, 0x60,
0x4c, 0xef, 0x35, 0xcc, 0x5e, 0x31, 0xed, 0xfd, 0x5c, 0x87, 0x96, 0x13, 0xbb, 0xea, 0x0e, 0xea, 0xc9, 0x0e, 0xf8, 0xfa, 0x8e, 0x70, 0x29, 0xb9, 0xbd, 0xfd, 0x3e, 0x5d, 0x2c, 0x90, 0x00, 0x1a,
0x8e, 0x5d, 0xa9, 0xac, 0xfb, 0xb0, 0x69, 0xc3, 0xe9, 0x52, 0xc2, 0xfb, 0xdf, 0xa0, 0x76, 0x2c, 0x6c, 0x92, 0x30, 0xbd, 0xb7, 0x6e, 0xf6, 0x8a, 0x69, 0xe7, 0x67, 0x6c, 0xc4, 0x4e, 0xec, 0xaa,
0xde, 0xa6, 0xc3, 0x7d, 0xf0, 0x92, 0x9c, 0xcd, 0xdd, 0x9f, 0xb2, 0xf2, 0xe4, 0xf1, 0xd1, 0xe0, 0x1b, 0xb1, 0x3b, 0x76, 0xe9, 0x66, 0x3d, 0x80, 0x4d, 0x1b, 0x4e, 0x57, 0x12, 0xde, 0xff, 0x06,
0xf0, 0xdb, 0xdc, 0x66, 0x76, 0x1b, 0x1d, 0xf5, 0xf4, 0x02, 0x35, 0xb4, 0xde, 0x3f, 0x18, 0x90, 0xb5, 0x65, 0xf1, 0xb6, 0x1c, 0x1e, 0x80, 0x97, 0xe4, 0x6c, 0xea, 0x9a, 0x70, 0xe5, 0xc9, 0xc3,
0xd1, 0xec, 0x54, 0x2a, 0x2e, 0xae, 0x3a, 0x20, 0xee, 0xd8, 0x95, 0x80, 0x8c, 0xa0, 0x25, 0xb2, 0xa3, 0xde, 0xe1, 0x37, 0xb9, 0xad, 0xec, 0x26, 0x3a, 0xea, 0xe9, 0x05, 0x6a, 0x68, 0x9d, 0x7f,
0x4c, 0x85, 0x11, 0xbb, 0x2c, 0x16, 0x14, 0x21, 0xa3, 0xc1, 0x70, 0x4b, 0x13, 0x75, 0x23, 0xb1, 0x30, 0x20, 0x83, 0xc9, 0x4c, 0x2a, 0x2e, 0xae, 0x3a, 0x20, 0xee, 0xd8, 0xa5, 0x80, 0x0c, 0xa0,
0x73, 0xda, 0xd4, 0xd4, 0x11, 0xc3, 0x26, 0x7f, 0xa3, 0x68, 0xbf, 0x27, 0xb8, 0x22, 0x95, 0x60, 0x21, 0xb2, 0x4c, 0x85, 0x11, 0xbb, 0x2c, 0x16, 0x14, 0x21, 0x83, 0x5e, 0x7f, 0x4b, 0x13, 0x75,
0x79, 0x38, 0xe5, 0xe7, 0xfa, 0x49, 0xd1, 0x58, 0xf7, 0x18, 0x38, 0x48, 0x23, 0x71, 0x6e, 0x02, 0x23, 0xb1, 0x73, 0x5a, 0xd7, 0xd4, 0x01, 0xc3, 0x26, 0x7f, 0xa3, 0x68, 0xbf, 0x27, 0xb8, 0x22,
0xf5, 0x15, 0x3f, 0xa7, 0xd7, 0x9d, 0xc0, 0xb0, 0xe0, 0xe3, 0xa2, 0x24, 0x9f, 0xc3, 0x0e, 0x5f, 0x95, 0x60, 0x79, 0x38, 0xe6, 0x67, 0xfa, 0xb5, 0x5a, 0x5f, 0xf5, 0x4f, 0x71, 0x90, 0x46, 0xe2,
0xc0, 0xb4, 0x62, 0x38, 0xc3, 0x17, 0x19, 0xfe, 0x58, 0xc2, 0x68, 0x86, 0x8a, 0xa6, 0xb7, 0x79, 0xcc, 0x04, 0xea, 0x4b, 0x7e, 0x46, 0xaf, 0x3b, 0x81, 0x7e, 0xc1, 0xc7, 0x45, 0x49, 0x3e, 0x83,
0xf4, 0x26, 0x2f, 0x4b, 0x7d, 0x6d, 0x11, 0x23, 0x0d, 0x18, 0xee, 0x5c, 0x3c, 0xdd, 0xbd, 0xf6, 0x1d, 0x3e, 0x87, 0x69, 0xc5, 0x70, 0x82, 0x3f, 0x76, 0xf8, 0xb0, 0x84, 0xd1, 0x04, 0x15, 0x4d,
0x27, 0x7e, 0xff, 0x3e, 0xdd, 0xad, 0xfd, 0xf8, 0x6c, 0xb7, 0x76, 0x81, 0xdf, 0x1f, 0xf8, 0xfd, 0x6f, 0xf3, 0xe8, 0x4d, 0x5e, 0x96, 0xfa, 0xca, 0x22, 0x06, 0x1a, 0xd0, 0xdf, 0x39, 0x7f, 0xb6,
0x85, 0xdf, 0x49, 0xd3, 0xbc, 0x4b, 0x3f, 0xf8, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x27, 0xf6, 0x7b, 0xed, 0x4f, 0xfc, 0xfe, 0x7d, 0xb6, 0x5b, 0xfb, 0xf1, 0xf9, 0x6e, 0xed, 0x1c, 0xbf, 0x3f,
0x9e, 0x07, 0x0b, 0x00, 0x00, 0xf0, 0xfb, 0x0b, 0xbf, 0x93, 0xba, 0xf9, 0xbd, 0xfd, 0xe0, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff,
0x56, 0x49, 0xe6, 0x55, 0x4e, 0x0b, 0x00, 0x00,
} }

View file

@ -62,6 +62,10 @@ message Service {
// the optional fields like node_port or virtual_ip and it // the optional fields like node_port or virtual_ip and it
// could be auto allocated by the system. // could be auto allocated by the system.
Endpoint endpoint = 4; Endpoint endpoint = 4;
// UpdateStatus contains the status of an update, if one is in
// progress.
UpdateStatus update_status = 5;
} }
// Endpoint specified all the network parameters required to // Endpoint specified all the network parameters required to

File diff suppressed because it is too large Load diff

View file

@ -277,6 +277,54 @@ message UpdateConfig {
// Amount of time between updates. // Amount of time between updates.
Duration delay = 2 [(gogoproto.nullable) = false]; Duration delay = 2 [(gogoproto.nullable) = false];
enum FailureAction {
PAUSE = 0;
CONTINUE = 1;
// TODO(aaronl): Add ROLLBACK as a supported failure mode.
// (#486)
}
// FailureAction is the action to take when an update failures.
// Currently, a failure is defined as a single updated task failing to
// reach the RUNNING state. In the future, there will be configuration
// to define what is treated as a failure (see #486 for a proposal).
FailureAction failure_action = 3;
}
// UpdateStatus is the status of an update in progress.
message UpdateStatus {
enum UpdateState {
UNKNOWN = 0;
UPDATING = 1;
PAUSED = 2;
COMPLETED = 3;
// TODO(aaronl): add ROLLING_BACK, ROLLED_BACK as part of
// rollback support.
}
// State is the state of this update. It indicates whether the
// update is in progress, completed, or is paused.
UpdateState state = 1;
// StartedAt is the time at which the update was started.
Timestamp started_at = 2;
// CompletedAt is the time at which the update completed.
Timestamp completed_at = 3;
// TODO(aaronl): Consider adding a timestamp showing when the most
// recent task update took place. Currently, this is nontrivial
// because each service update kicks off a replacement update, so
// updating the service object with a timestamp at every step along
// the rolling update would cause the rolling update to be constantly
// restarted.
// Message explains how the update got into its current state. For
// example, if the update is paused, it will explain what is preventing
// the update from proceeding (typically the failure of a task to start up
// when OnFailure is PAUSE).
string message = 4;
} }
// TaskState enumerates the states that a task progresses through within an // TaskState enumerates the states that a task progresses through within an

View file

@ -122,7 +122,7 @@ func AuthorizeForwardedRoleAndOrg(ctx context.Context, authorizedRoles, forwarde
// This was a forwarded request. Authorize the forwarder, and // This was a forwarded request. Authorize the forwarder, and
// check if the forwarded role matches one of the authorized // check if the forwarded role matches one of the authorized
// roles. // roles.
forwardedID, forwardedOrg, forwardedOUs := forwardedTLSInfoFromContext(ctx) _, forwardedID, forwardedOrg, forwardedOUs := forwardedTLSInfoFromContext(ctx)
if len(forwardedOUs) == 0 || forwardedID == "" || forwardedOrg == "" { if len(forwardedOUs) == 0 || forwardedID == "" || forwardedOrg == "" {
return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request") return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request")
@ -178,6 +178,10 @@ type RemoteNodeInfo struct {
// ForwardedBy contains information for the node that forwarded this // ForwardedBy contains information for the node that forwarded this
// request. It is set to nil if the request was received directly. // request. It is set to nil if the request was received directly.
ForwardedBy *RemoteNodeInfo ForwardedBy *RemoteNodeInfo
// RemoteAddr is the address that this node is connecting to the cluster
// from.
RemoteAddr string
} }
// RemoteNode returns the node ID and role from the client's TLS certificate. // RemoteNode returns the node ID and role from the client's TLS certificate.
@ -195,18 +199,30 @@ func RemoteNode(ctx context.Context) (RemoteNodeInfo, error) {
org = certSubj.Organization[0] org = certSubj.Organization[0]
} }
peer, ok := peer.FromContext(ctx)
if !ok {
return RemoteNodeInfo{}, grpc.Errorf(codes.PermissionDenied, "Permission denied: no peer info")
}
directInfo := RemoteNodeInfo{ directInfo := RemoteNodeInfo{
Roles: certSubj.OrganizationalUnit, Roles: certSubj.OrganizationalUnit,
NodeID: certSubj.CommonName, NodeID: certSubj.CommonName,
Organization: org, Organization: org,
RemoteAddr: peer.Addr.String(),
} }
if isForwardedRequest(ctx) { if isForwardedRequest(ctx) {
cn, org, ous := forwardedTLSInfoFromContext(ctx) remoteAddr, cn, org, ous := forwardedTLSInfoFromContext(ctx)
if len(ous) == 0 || cn == "" || org == "" { if len(ous) == 0 || cn == "" || org == "" {
return RemoteNodeInfo{}, grpc.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request") return RemoteNodeInfo{}, grpc.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request")
} }
return RemoteNodeInfo{Roles: ous, NodeID: cn, Organization: org, ForwardedBy: &directInfo}, nil return RemoteNodeInfo{
Roles: ous,
NodeID: cn,
Organization: org,
ForwardedBy: &directInfo,
RemoteAddr: remoteAddr,
}, nil
} }
return directInfo, nil return directInfo, nil

View file

@ -3,6 +3,7 @@ package ca
import ( import (
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
) )
const ( const (
@ -10,20 +11,24 @@ const (
certCNKey = "forwarded_cert_cn" certCNKey = "forwarded_cert_cn"
certOUKey = "forwarded_cert_ou" certOUKey = "forwarded_cert_ou"
certOrgKey = "forwarded_cert_org" certOrgKey = "forwarded_cert_org"
remoteAddrKey = "remote_addr"
) )
// forwardedTLSInfoFromContext obtains forwarded TLS CN/OU from the grpc.MD // forwardedTLSInfoFromContext obtains forwarded TLS CN/OU from the grpc.MD
// object in ctx. // object in ctx.
func forwardedTLSInfoFromContext(ctx context.Context) (string, string, []string) { func forwardedTLSInfoFromContext(ctx context.Context) (remoteAddr string, cn string, org string, ous []string) {
var cn, org string
md, _ := metadata.FromContext(ctx) md, _ := metadata.FromContext(ctx)
if len(md[remoteAddrKey]) != 0 {
remoteAddr = md[remoteAddrKey][0]
}
if len(md[certCNKey]) != 0 { if len(md[certCNKey]) != 0 {
cn = md[certCNKey][0] cn = md[certCNKey][0]
} }
if len(md[certOrgKey]) != 0 { if len(md[certOrgKey]) != 0 {
org = md[certOrgKey][0] org = md[certOrgKey][0]
} }
return cn, org, md[certOUKey] ous = md[certOUKey]
return
} }
func isForwardedRequest(ctx context.Context) bool { func isForwardedRequest(ctx context.Context) bool {
@ -54,6 +59,7 @@ func WithMetadataForwardTLSInfo(ctx context.Context) (context.Context, error) {
org = certSubj.Organization[0] org = certSubj.Organization[0]
} }
} }
// If there's no TLS cert, forward with blank TLS metadata. // If there's no TLS cert, forward with blank TLS metadata.
// Note that the presence of this blank metadata is extremely // Note that the presence of this blank metadata is extremely
// important. Without it, it would look like manager is making // important. Without it, it would look like manager is making
@ -62,6 +68,10 @@ func WithMetadataForwardTLSInfo(ctx context.Context) (context.Context, error) {
md[certCNKey] = []string{cn} md[certCNKey] = []string{cn}
md[certOrgKey] = []string{org} md[certOrgKey] = []string{org}
md[certOUKey] = ous md[certOUKey] = ous
peer, ok := peer.FromContext(ctx)
if ok {
md[remoteAddrKey] = []string{peer.Addr.String()}
}
return metadata.NewContext(ctx, md), nil return metadata.NewContext(ctx, md), nil
} }

View file

@ -16,7 +16,7 @@ import (
) )
var ( var (
// alpnProtoStr are the specified application level protocols for gRPC. // alpnProtoStr is the specified application level protocols for gRPC.
alpnProtoStr = []string{"h2"} alpnProtoStr = []string{"h2"}
) )

View file

@ -3,6 +3,7 @@ package controlapi
import ( import (
"errors" "errors"
"reflect" "reflect"
"strconv"
"github.com/docker/engine-api/types/reference" "github.com/docker/engine-api/types/reference"
"github.com/docker/swarmkit/api" "github.com/docker/swarmkit/api"
@ -144,6 +145,10 @@ func validateEndpointSpec(epSpec *api.EndpointSpec) error {
return nil return nil
} }
if len(epSpec.Ports) > 0 && epSpec.Mode == api.ResolutionModeDNSRoundRobin {
return grpc.Errorf(codes.InvalidArgument, "EndpointSpec: ports can't be used with dnsrr mode")
}
portSet := make(map[api.PortConfig]struct{}) portSet := make(map[api.PortConfig]struct{})
for _, port := range epSpec.Ports { for _, port := range epSpec.Ports {
if _, ok := portSet[*port]; ok { if _, ok := portSet[*port]; ok {
@ -175,6 +180,59 @@ func validateServiceSpec(spec *api.ServiceSpec) error {
return nil return nil
} }
// checkPortConflicts does a best effort to find if the passed in spec has port
// conflicts with existing services.
func (s *Server) checkPortConflicts(spec *api.ServiceSpec) error {
if spec.Endpoint == nil {
return nil
}
pcToString := func(pc *api.PortConfig) string {
port := strconv.FormatUint(uint64(pc.PublishedPort), 10)
return port + "/" + pc.Protocol.String()
}
reqPorts := make(map[string]bool)
for _, pc := range spec.Endpoint.Ports {
if pc.PublishedPort > 0 {
reqPorts[pcToString(pc)] = true
}
}
if len(reqPorts) == 0 {
return nil
}
var (
services []*api.Service
err error
)
s.store.View(func(tx store.ReadTx) {
services, err = store.FindServices(tx, store.All)
})
if err != nil {
return err
}
for _, service := range services {
if service.Spec.Endpoint != nil {
for _, pc := range service.Spec.Endpoint.Ports {
if reqPorts[pcToString(pc)] {
return grpc.Errorf(codes.InvalidArgument, "port '%d' is already in use by service %s", pc.PublishedPort, service.ID)
}
}
}
if service.Endpoint != nil {
for _, pc := range service.Endpoint.Ports {
if reqPorts[pcToString(pc)] {
return grpc.Errorf(codes.InvalidArgument, "port '%d' is already in use by service %s", pc.PublishedPort, service.ID)
}
}
}
}
return nil
}
// CreateService creates and return a Service based on the provided ServiceSpec. // CreateService creates and return a Service based on the provided ServiceSpec.
// - Returns `InvalidArgument` if the ServiceSpec is malformed. // - Returns `InvalidArgument` if the ServiceSpec is malformed.
// - Returns `Unimplemented` if the ServiceSpec references unimplemented features. // - Returns `Unimplemented` if the ServiceSpec references unimplemented features.
@ -185,6 +243,10 @@ func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRe
return nil, err return nil, err
} }
if err := s.checkPortConflicts(request.Spec); err != nil {
return nil, err
}
// TODO(aluzzardi): Consider using `Name` as a primary key to handle // TODO(aluzzardi): Consider using `Name` as a primary key to handle
// duplicate creations. See #65 // duplicate creations. See #65
service := &api.Service{ service := &api.Service{
@ -239,6 +301,19 @@ func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRe
} }
var service *api.Service var service *api.Service
s.store.View(func(tx store.ReadTx) {
service = store.GetService(tx, request.ServiceID)
})
if service == nil {
return nil, grpc.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
}
if request.Spec.Endpoint != nil && !reflect.DeepEqual(request.Spec.Endpoint, service.Spec.Endpoint) {
if err := s.checkPortConflicts(request.Spec); err != nil {
return nil, err
}
}
err := s.store.Update(func(tx store.Tx) error { err := s.store.Update(func(tx store.Tx) error {
service = store.GetService(tx, request.ServiceID) service = store.GetService(tx, request.ServiceID)
if service == nil { if service == nil {
@ -257,6 +332,10 @@ func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRe
} }
service.Meta.Version = *request.ServiceVersion service.Meta.Version = *request.ServiceVersion
service.Spec = *request.Spec.Copy() service.Spec = *request.Spec.Copy()
// Reset update status
service.UpdateStatus = nil
return store.UpdateService(tx, service) return store.UpdateService(tx, service)
}) })
if err != nil { if err != nil {

View file

@ -52,7 +52,7 @@ var (
// ErrSessionInvalid returned when the session in use is no longer valid. // ErrSessionInvalid returned when the session in use is no longer valid.
// The node should re-register and start a new session. // The node should re-register and start a new session.
ErrSessionInvalid = errors.New("session invalid") ErrSessionInvalid = errors.New("session invalid")
// ErrNodeNotFound returned when the Node doesn't exists in raft. // ErrNodeNotFound returned when the Node doesn't exist in raft.
ErrNodeNotFound = errors.New("node not found") ErrNodeNotFound = errors.New("node not found")
) )

View file

@ -33,7 +33,7 @@ import (
const ( const (
// defaultTaskHistoryRetentionLimit is the number of tasks to keep. // defaultTaskHistoryRetentionLimit is the number of tasks to keep.
defaultTaskHistoryRetentionLimit = 10 defaultTaskHistoryRetentionLimit = 5
) )
// Config is used to tune the Manager. // Config is used to tune the Manager.
@ -49,6 +49,9 @@ type Config struct {
// ProtoAddr fields will be used to create listeners otherwise. // ProtoAddr fields will be used to create listeners otherwise.
ProtoListener map[string]net.Listener ProtoListener map[string]net.Listener
// AdvertiseAddr is a map of addresses to advertise, by protocol.
AdvertiseAddr string
// JoinRaft is an optional address of a node in an existing raft // JoinRaft is an optional address of a node in an existing raft
// cluster to join. // cluster to join.
JoinRaft string JoinRaft string
@ -120,41 +123,17 @@ func New(config *Config) (*Manager, error) {
tcpAddr := config.ProtoAddr["tcp"] tcpAddr := config.ProtoAddr["tcp"]
if config.AdvertiseAddr != "" {
tcpAddr = config.AdvertiseAddr
}
if tcpAddr == "" { if tcpAddr == "" {
return nil, errors.New("no tcp listen address or listener provided") return nil, errors.New("no tcp listen address or listener provided")
} }
listenHost, listenPort, err := net.SplitHostPort(tcpAddr)
if err == nil {
ip := net.ParseIP(listenHost)
if ip != nil && ip.IsUnspecified() {
// Find our local IP address associated with the default route.
// This may not be the appropriate address to use for internal
// cluster communications, but it seems like the best default.
// The admin can override this address if necessary.
conn, err := net.Dial("udp", "8.8.8.8:53")
if err != nil {
return nil, fmt.Errorf("could not determine local IP address: %v", err)
}
localAddr := conn.LocalAddr().String()
conn.Close()
listenHost, _, err = net.SplitHostPort(localAddr)
if err != nil {
return nil, fmt.Errorf("could not split local IP address: %v", err)
}
tcpAddr = net.JoinHostPort(listenHost, listenPort)
}
}
// TODO(stevvooe): Reported address of manager is plumbed to listen addr
// for now, may want to make this separate. This can be tricky to get right
// so we need to make it easy to override. This needs to be the address
// through which agent nodes access the manager.
dispatcherConfig.Addr = tcpAddr dispatcherConfig.Addr = tcpAddr
err = os.MkdirAll(filepath.Dir(config.ProtoAddr["unix"]), 0700) err := os.MkdirAll(filepath.Dir(config.ProtoAddr["unix"]), 0700)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create socket directory: %v", err) return nil, fmt.Errorf("failed to create socket directory: %v", err)
} }
@ -359,7 +338,7 @@ func (m *Manager) Run(parent context.Context) error {
if err != nil { if err != nil {
log.G(ctx).WithError(err).Error("failed to create allocator") log.G(ctx).WithError(err).Error("failed to create allocator")
// TODO(stevvooe): It doesn't seem correct here to fail // TODO(stevvooe): It doesn't seem correct here to fail
// creating the allocator but then use it anyways. // creating the allocator but then use it anyway.
} }
go func(keyManager *keymanager.KeyManager) { go func(keyManager *keymanager.KeyManager) {

View file

@ -62,8 +62,14 @@ func (r *ReplicatedOrchestrator) Run(ctx context.Context) error {
if err = r.initTasks(ctx, readTx); err != nil { if err = r.initTasks(ctx, readTx); err != nil {
return return
} }
err = r.initServices(readTx)
err = r.initCluster(readTx) if err = r.initServices(readTx); err != nil {
return
}
if err = r.initCluster(readTx); err != nil {
return
}
}) })
if err != nil { if err != nil {
return err return err

View file

@ -31,8 +31,13 @@ type instanceRestartInfo struct {
} }
type delayedStart struct { type delayedStart struct {
// cancel is called to cancel the delayed start.
cancel func() cancel func()
doneCh chan struct{} doneCh chan struct{}
// waiter is set to true if the next restart is waiting for this delay
// to complete.
waiter bool
} }
// RestartSupervisor initiates and manages restarts. It's responsible for // RestartSupervisor initiates and manages restarts. It's responsible for
@ -40,7 +45,7 @@ type delayedStart struct {
type RestartSupervisor struct { type RestartSupervisor struct {
mu sync.Mutex mu sync.Mutex
store *store.MemoryStore store *store.MemoryStore
delays map[string]delayedStart delays map[string]*delayedStart
history map[instanceTuple]*instanceRestartInfo history map[instanceTuple]*instanceRestartInfo
historyByService map[string]map[instanceTuple]struct{} historyByService map[string]map[instanceTuple]struct{}
taskTimeout time.Duration taskTimeout time.Duration
@ -50,18 +55,59 @@ type RestartSupervisor struct {
func NewRestartSupervisor(store *store.MemoryStore) *RestartSupervisor { func NewRestartSupervisor(store *store.MemoryStore) *RestartSupervisor {
return &RestartSupervisor{ return &RestartSupervisor{
store: store, store: store,
delays: make(map[string]delayedStart), delays: make(map[string]*delayedStart),
history: make(map[instanceTuple]*instanceRestartInfo), history: make(map[instanceTuple]*instanceRestartInfo),
historyByService: make(map[string]map[instanceTuple]struct{}), historyByService: make(map[string]map[instanceTuple]struct{}),
taskTimeout: defaultOldTaskTimeout, taskTimeout: defaultOldTaskTimeout,
} }
} }
func (r *RestartSupervisor) waitRestart(ctx context.Context, oldDelay *delayedStart, cluster *api.Cluster, taskID string) {
// Wait for the last restart delay to elapse.
select {
case <-oldDelay.doneCh:
case <-ctx.Done():
return
}
// Start the next restart
err := r.store.Update(func(tx store.Tx) error {
t := store.GetTask(tx, taskID)
if t == nil {
return nil
}
service := store.GetService(tx, t.ServiceID)
if service == nil {
return nil
}
return r.Restart(ctx, tx, cluster, service, *t)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to restart task after waiting for previous restart")
}
}
// Restart initiates a new task to replace t if appropriate under the service's // Restart initiates a new task to replace t if appropriate under the service's
// restart policy. // restart policy.
func (r *RestartSupervisor) Restart(ctx context.Context, tx store.Tx, cluster *api.Cluster, service *api.Service, t api.Task) error { func (r *RestartSupervisor) Restart(ctx context.Context, tx store.Tx, cluster *api.Cluster, service *api.Service, t api.Task) error {
// TODO(aluzzardi): This function should not depend on `service`. // TODO(aluzzardi): This function should not depend on `service`.
// Is the old task still in the process of restarting? If so, wait for
// its restart delay to elapse, to avoid tight restart loops (for
// example, when the image doesn't exist).
r.mu.Lock()
oldDelay, ok := r.delays[t.ID]
if ok {
if !oldDelay.waiter {
oldDelay.waiter = true
go r.waitRestart(ctx, oldDelay, cluster, t.ID)
}
r.mu.Unlock()
return nil
}
r.mu.Unlock()
t.DesiredState = api.TaskStateShutdown t.DesiredState = api.TaskStateShutdown
err := store.UpdateTask(tx, &t) err := store.UpdateTask(tx, &t)
if err != nil { if err != nil {
@ -87,10 +133,10 @@ func (r *RestartSupervisor) Restart(ctx context.Context, tx store.Tx, cluster *a
n := store.GetNode(tx, t.NodeID) n := store.GetNode(tx, t.NodeID)
restartTask.DesiredState = api.TaskStateAccepted restartTask.DesiredState = api.TaskStateReady
var restartDelay time.Duration var restartDelay time.Duration
// Restart delay does not applied to drained nodes // Restart delay is not applied to drained nodes
if n == nil || n.Spec.Availability != api.NodeAvailabilityDrain { if n == nil || n.Spec.Availability != api.NodeAvailabilityDrain {
if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil { if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
var err error var err error
@ -254,7 +300,7 @@ func (r *RestartSupervisor) DelayStart(ctx context.Context, _ store.Tx, oldTask
<-oldDelay.doneCh <-oldDelay.doneCh
r.mu.Lock() r.mu.Lock()
} }
r.delays[newTaskID] = delayedStart{cancel: cancel, doneCh: doneCh} r.delays[newTaskID] = &delayedStart{cancel: cancel, doneCh: doneCh}
r.mu.Unlock() r.mu.Unlock()
var watch chan events.Event var watch chan events.Event

View file

@ -56,7 +56,7 @@ func (r *ReplicatedOrchestrator) initTasks(ctx context.Context, readTx store.Rea
continue continue
} }
// TODO(aluzzardi): This is shady. We should have a more generic condition. // TODO(aluzzardi): This is shady. We should have a more generic condition.
if t.DesiredState != api.TaskStateAccepted || !isReplicatedService(service) { if t.DesiredState != api.TaskStateReady || !isReplicatedService(service) {
continue continue
} }
restartDelay := defaultRestartDelay restartDelay := defaultRestartDelay
@ -80,7 +80,7 @@ func (r *ReplicatedOrchestrator) initTasks(ctx context.Context, readTx store.Rea
_ = batch.Update(func(tx store.Tx) error { _ = batch.Update(func(tx store.Tx) error {
t := store.GetTask(tx, t.ID) t := store.GetTask(tx, t.ID)
// TODO(aluzzardi): This is shady as well. We should have a more generic condition. // TODO(aluzzardi): This is shady as well. We should have a more generic condition.
if t == nil || t.DesiredState != api.TaskStateAccepted { if t == nil || t.DesiredState != api.TaskStateReady {
return nil return nil
} }
r.restarts.DelayStart(ctx, tx, nil, t.ID, restartDelay, true) r.restarts.DelayStart(ctx, tx, nil, t.ID, restartDelay, true)

View file

@ -8,6 +8,7 @@ import (
"golang.org/x/net/context" "golang.org/x/net/context"
"github.com/docker/go-events"
"github.com/docker/swarmkit/api" "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log" "github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/state" "github.com/docker/swarmkit/manager/state"
@ -43,13 +44,17 @@ func (u *UpdateSupervisor) Update(ctx context.Context, cluster *api.Cluster, ser
id := service.ID id := service.ID
if update, ok := u.updates[id]; ok { if update, ok := u.updates[id]; ok {
if !update.isServiceDirty(service) {
// There's already an update working towards this goal.
return
}
update.Cancel() update.Cancel()
} }
update := NewUpdater(u.store, u.restarts) update := NewUpdater(u.store, u.restarts, cluster, service)
u.updates[id] = update u.updates[id] = update
go func() { go func() {
update.Run(ctx, cluster, service, tasks) update.Run(ctx, tasks)
u.l.Lock() u.l.Lock()
if u.updates[id] == update { if u.updates[id] == update {
delete(u.updates, id) delete(u.updates, id)
@ -74,6 +79,9 @@ type Updater struct {
watchQueue *watch.Queue watchQueue *watch.Queue
restarts *RestartSupervisor restarts *RestartSupervisor
cluster *api.Cluster
newService *api.Service
// stopChan signals to the state machine to stop running. // stopChan signals to the state machine to stop running.
stopChan chan struct{} stopChan chan struct{}
// doneChan is closed when the state machine terminates. // doneChan is closed when the state machine terminates.
@ -81,11 +89,13 @@ type Updater struct {
} }
// NewUpdater creates a new Updater. // NewUpdater creates a new Updater.
func NewUpdater(store *store.MemoryStore, restartSupervisor *RestartSupervisor) *Updater { func NewUpdater(store *store.MemoryStore, restartSupervisor *RestartSupervisor, cluster *api.Cluster, newService *api.Service) *Updater {
return &Updater{ return &Updater{
store: store, store: store,
watchQueue: store.WatchQueue(), watchQueue: store.WatchQueue(),
restarts: restartSupervisor, restarts: restartSupervisor,
cluster: cluster.Copy(),
newService: newService.Copy(),
stopChan: make(chan struct{}), stopChan: make(chan struct{}),
doneChan: make(chan struct{}), doneChan: make(chan struct{}),
} }
@ -98,22 +108,35 @@ func (u *Updater) Cancel() {
} }
// Run starts the update and returns only once its complete or cancelled. // Run starts the update and returns only once its complete or cancelled.
func (u *Updater) Run(ctx context.Context, cluster *api.Cluster, service *api.Service, tasks []*api.Task) { func (u *Updater) Run(ctx context.Context, tasks []*api.Task) {
defer close(u.doneChan) defer close(u.doneChan)
service := u.newService
// If the update is in a PAUSED state, we should not do anything.
if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_PAUSED {
return
}
dirtyTasks := []*api.Task{} dirtyTasks := []*api.Task{}
for _, t := range tasks { for _, t := range tasks {
if !reflect.DeepEqual(service.Spec.Task, t.Spec) || if u.isTaskDirty(t) {
(t.Endpoint != nil &&
!reflect.DeepEqual(service.Spec.Endpoint, t.Endpoint.Spec)) {
dirtyTasks = append(dirtyTasks, t) dirtyTasks = append(dirtyTasks, t)
} }
} }
// Abort immediately if all tasks are clean. // Abort immediately if all tasks are clean.
if len(dirtyTasks) == 0 { if len(dirtyTasks) == 0 {
if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_UPDATING {
u.completeUpdate(ctx, service.ID)
}
return return
} }
// If there's no update in progress, we are starting one.
if service.UpdateStatus == nil {
u.startUpdate(ctx, service.ID)
}
parallelism := 0 parallelism := 0
if service.Spec.Update != nil { if service.Spec.Update != nil {
parallelism = int(service.Spec.Update.Parallelism) parallelism = int(service.Spec.Update.Parallelism)
@ -130,39 +153,76 @@ func (u *Updater) Run(ctx context.Context, cluster *api.Cluster, service *api.Se
wg.Add(parallelism) wg.Add(parallelism)
for i := 0; i < parallelism; i++ { for i := 0; i < parallelism; i++ {
go func() { go func() {
u.worker(ctx, cluster, service, taskQueue) u.worker(ctx, taskQueue)
wg.Done() wg.Done()
}() }()
} }
for _, t := range dirtyTasks { var failedTaskWatch chan events.Event
// Wait for a worker to pick up the task or abort the update, whichever comes first.
select {
case <-u.stopChan:
break
case taskQueue <- t: if service.Spec.Update == nil || service.Spec.Update.FailureAction == api.UpdateConfig_PAUSE {
var cancelWatch func()
failedTaskWatch, cancelWatch = state.Watch(
u.store.WatchQueue(),
state.EventUpdateTask{
Task: &api.Task{ServiceID: service.ID, Status: api.TaskStatus{State: api.TaskStateRunning}},
Checks: []state.TaskCheckFunc{state.TaskCheckServiceID, state.TaskCheckStateGreaterThan},
},
)
defer cancelWatch()
}
stopped := false
taskLoop:
for _, t := range dirtyTasks {
retryLoop:
for {
// Wait for a worker to pick up the task or abort the update, whichever comes first.
select {
case <-u.stopChan:
stopped = true
break taskLoop
case ev := <-failedTaskWatch:
failedTask := ev.(state.EventUpdateTask).Task
// If this failed/completed task has a spec matching
// the one we're updating to, we should pause the
// update.
if !u.isTaskDirty(failedTask) {
stopped = true
message := fmt.Sprintf("update paused due to failure or early termination of task %s", failedTask.ID)
u.pauseUpdate(ctx, service.ID, message)
break taskLoop
}
case taskQueue <- t:
break retryLoop
}
} }
} }
close(taskQueue) close(taskQueue)
wg.Wait() wg.Wait()
if !stopped {
u.completeUpdate(ctx, service.ID)
}
} }
func (u *Updater) worker(ctx context.Context, cluster *api.Cluster, service *api.Service, queue <-chan *api.Task) { func (u *Updater) worker(ctx context.Context, queue <-chan *api.Task) {
for t := range queue { for t := range queue {
updated := newTask(cluster, service, t.Slot) updated := newTask(u.cluster, u.newService, t.Slot)
updated.DesiredState = api.TaskStateReady updated.DesiredState = api.TaskStateReady
if isGlobalService(service) { if isGlobalService(u.newService) {
updated.NodeID = t.NodeID updated.NodeID = t.NodeID
} }
if err := u.updateTask(ctx, service, t, updated); err != nil { if err := u.updateTask(ctx, t, updated); err != nil {
log.G(ctx).WithError(err).WithField("task.id", t.ID).Error("update failed") log.G(ctx).WithError(err).WithField("task.id", t.ID).Error("update failed")
} }
if service.Spec.Update != nil && (service.Spec.Update.Delay.Seconds != 0 || service.Spec.Update.Delay.Nanos != 0) { if u.newService.Spec.Update != nil && (u.newService.Spec.Update.Delay.Seconds != 0 || u.newService.Spec.Update.Delay.Nanos != 0) {
delay, err := ptypes.Duration(&service.Spec.Update.Delay) delay, err := ptypes.Duration(&u.newService.Spec.Update.Delay)
if err != nil { if err != nil {
log.G(ctx).WithError(err).Error("invalid update delay") log.G(ctx).WithError(err).Error("invalid update delay")
continue continue
@ -176,7 +236,7 @@ func (u *Updater) worker(ctx context.Context, cluster *api.Cluster, service *api
} }
} }
func (u *Updater) updateTask(ctx context.Context, service *api.Service, original, updated *api.Task) error { func (u *Updater) updateTask(ctx context.Context, original, updated *api.Task) error {
log.G(ctx).Debugf("replacing %s with %s", original.ID, updated.ID) log.G(ctx).Debugf("replacing %s with %s", original.ID, updated.ID)
// Kick off the watch before even creating the updated task. This is in order to avoid missing any event. // Kick off the watch before even creating the updated task. This is in order to avoid missing any event.
taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{ taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{
@ -231,3 +291,86 @@ func (u *Updater) updateTask(ctx context.Context, service *api.Service, original
} }
} }
} }
func (u *Updater) isTaskDirty(t *api.Task) bool {
return !reflect.DeepEqual(u.newService.Spec.Task, t.Spec) ||
(t.Endpoint != nil && !reflect.DeepEqual(u.newService.Spec.Endpoint, t.Endpoint.Spec))
}
func (u *Updater) isServiceDirty(service *api.Service) bool {
return !reflect.DeepEqual(u.newService.Spec.Task, service.Spec.Task) ||
!reflect.DeepEqual(u.newService.Spec.Endpoint, service.Spec.Endpoint)
}
func (u *Updater) startUpdate(ctx context.Context, serviceID string) {
err := u.store.Update(func(tx store.Tx) error {
service := store.GetService(tx, serviceID)
if service == nil {
return nil
}
if service.UpdateStatus != nil {
return nil
}
service.UpdateStatus = &api.UpdateStatus{
State: api.UpdateStatus_UPDATING,
Message: "update in progress",
StartedAt: ptypes.MustTimestampProto(time.Now()),
}
return store.UpdateService(tx, service)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to mark update of service %s in progress", serviceID)
}
}
func (u *Updater) pauseUpdate(ctx context.Context, serviceID, message string) {
log.G(ctx).Debugf("pausing update of service %s", serviceID)
err := u.store.Update(func(tx store.Tx) error {
service := store.GetService(tx, serviceID)
if service == nil {
return nil
}
if service.UpdateStatus == nil {
// The service was updated since we started this update
return nil
}
service.UpdateStatus.State = api.UpdateStatus_PAUSED
service.UpdateStatus.Message = message
return store.UpdateService(tx, service)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to pause update of service %s", serviceID)
}
}
func (u *Updater) completeUpdate(ctx context.Context, serviceID string) {
log.G(ctx).Debugf("update of service %s complete", serviceID)
err := u.store.Update(func(tx store.Tx) error {
service := store.GetService(tx, serviceID)
if service == nil {
return nil
}
if service.UpdateStatus == nil {
// The service was changed since we started this update
return nil
}
service.UpdateStatus.State = api.UpdateStatus_COMPLETED
service.UpdateStatus.Message = "update completed"
service.UpdateStatus.CompletedAt = ptypes.MustTimestampProto(time.Now())
return store.UpdateService(tx, service)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to mark update of service %s complete", serviceID)
}
}

View file

@ -2,8 +2,10 @@ package raft
import ( import (
"errors" "errors"
"fmt"
"math" "math"
"math/rand" "math/rand"
"net"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -537,13 +539,33 @@ func (n *Node) Join(ctx context.Context, req *api.JoinRequest) (*api.JoinRespons
} }
} }
remoteAddr := req.Addr
// If the joining node sent an address like 0.0.0.0:4242, automatically
// determine its actual address based on the GRPC connection. This
// avoids the need for a prospective member to know its own address.
requestHost, requestPort, err := net.SplitHostPort(remoteAddr)
if err != nil {
return nil, fmt.Errorf("invalid address %s in raft join request", remoteAddr)
}
requestIP := net.ParseIP(requestHost)
if requestIP != nil && requestIP.IsUnspecified() {
remoteHost, _, err := net.SplitHostPort(nodeInfo.RemoteAddr)
if err != nil {
return nil, err
}
remoteAddr = net.JoinHostPort(remoteHost, requestPort)
}
// We do not bother submitting a configuration change for the // We do not bother submitting a configuration change for the
// new member if we can't contact it back using its address // new member if we can't contact it back using its address
if err := n.checkHealth(ctx, req.Addr, 5*time.Second); err != nil { if err := n.checkHealth(ctx, remoteAddr, 5*time.Second); err != nil {
return nil, err return nil, err
} }
err = n.addMember(ctx, req.Addr, raftID, nodeInfo.NodeID) err = n.addMember(ctx, remoteAddr, raftID, nodeInfo.NodeID)
if err != nil { if err != nil {
log.WithError(err).Errorf("failed to add member") log.WithError(err).Errorf("failed to add member")
return nil, err return nil, err

View file

@ -40,6 +40,11 @@ func TaskCheckNodeID(t1, t2 *api.Task) bool {
return t1.NodeID == t2.NodeID return t1.NodeID == t2.NodeID
} }
// TaskCheckServiceID is a TaskCheckFunc for matching service IDs.
func TaskCheckServiceID(t1, t2 *api.Task) bool {
return t1.ServiceID == t2.ServiceID
}
// TaskCheckStateGreaterThan is a TaskCheckFunc for checking task state. // TaskCheckStateGreaterThan is a TaskCheckFunc for checking task state.
func TaskCheckStateGreaterThan(t1, t2 *api.Task) bool { func TaskCheckStateGreaterThan(t1, t2 *api.Task) bool {
return t2.Status.State > t1.Status.State return t2.Status.State > t1.Status.State

View file

@ -8,6 +8,7 @@ import (
"syscall" "syscall"
"github.com/vishvananda/netlink/nl" "github.com/vishvananda/netlink/nl"
"github.com/vishvananda/netns"
) )
// IFA_FLAGS is a u32 attribute. // IFA_FLAGS is a u32 attribute.
@ -192,7 +193,17 @@ type AddrUpdate struct {
// AddrSubscribe takes a chan down which notifications will be sent // AddrSubscribe takes a chan down which notifications will be sent
// when addresses change. Close the 'done' chan to stop subscription. // when addresses change. Close the 'done' chan to stop subscription.
func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error { func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error {
s, err := nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR) return addrSubscribe(netns.None(), netns.None(), ch, done)
}
// AddrSubscribeAt works like AddrSubscribe plus it allows the caller
// to choose the network namespace in which to subscribe (ns).
func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {
return addrSubscribe(ns, netns.None(), ch, done)
}
func addrSubscribe(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {
s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR)
if err != nil { if err != nil {
return err return err
} }

View file

@ -143,7 +143,7 @@ func (h *Handle) FilterAdd(filter Filter) error {
if u32.RedirIndex != 0 { if u32.RedirIndex != 0 {
u32.Actions = append([]Action{NewMirredAction(u32.RedirIndex)}, u32.Actions...) u32.Actions = append([]Action{NewMirredAction(u32.RedirIndex)}, u32.Actions...)
} }
if err := encodeActions(actionsAttr, u32.Actions); err != nil { if err := EncodeActions(actionsAttr, u32.Actions); err != nil {
return err return err
} }
} else if fw, ok := filter.(*Fw); ok { } else if fw, ok := filter.(*Fw); ok {
@ -309,7 +309,7 @@ func toAttrs(tcgen *nl.TcGen, attrs *ActionAttrs) {
attrs.Bindcnt = int(tcgen.Bindcnt) attrs.Bindcnt = int(tcgen.Bindcnt)
} }
func encodeActions(attr *nl.RtAttr, actions []Action) error { func EncodeActions(attr *nl.RtAttr, actions []Action) error {
tabIndex := int(nl.TCA_ACT_TAB) tabIndex := int(nl.TCA_ACT_TAB)
for _, action := range actions { for _, action := range actions {

View file

@ -10,6 +10,7 @@ import (
"unsafe" "unsafe"
"github.com/vishvananda/netlink/nl" "github.com/vishvananda/netlink/nl"
"github.com/vishvananda/netns"
) )
const SizeofLinkStats = 0x5c const SizeofLinkStats = 0x5c
@ -425,7 +426,7 @@ func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum)) nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum))
} }
if vxlan.GBP { if vxlan.GBP {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GBP, boolAttr(vxlan.GBP)) nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GBP, []byte{})
} }
if vxlan.NoAge { if vxlan.NoAge {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0)) nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0))
@ -1011,7 +1012,17 @@ type LinkUpdate struct {
// LinkSubscribe takes a chan down which notifications will be sent // LinkSubscribe takes a chan down which notifications will be sent
// when links change. Close the 'done' chan to stop subscription. // when links change. Close the 'done' chan to stop subscription.
func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error { func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error {
s, err := nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK) return linkSubscribe(netns.None(), netns.None(), ch, done)
}
// LinkSubscribeAt works like LinkSubscribe plus it allows the caller
// to choose the network namespace in which to subscribe (ns).
func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error {
return linkSubscribe(ns, netns.None(), ch, done)
}
func linkSubscribe(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error {
s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK)
if err != nil { if err != nil {
return err return err
} }
@ -1152,7 +1163,7 @@ func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) {
case nl.IFLA_VXLAN_UDP_CSUM: case nl.IFLA_VXLAN_UDP_CSUM:
vxlan.UDPCSum = int8(datum.Value[0]) != 0 vxlan.UDPCSum = int8(datum.Value[0]) != 0
case nl.IFLA_VXLAN_GBP: case nl.IFLA_VXLAN_GBP:
vxlan.GBP = int8(datum.Value[0]) != 0 vxlan.GBP = true
case nl.IFLA_VXLAN_AGEING: case nl.IFLA_VXLAN_AGEING:
vxlan.Age = int(native.Uint32(datum.Value[0:4])) vxlan.Age = int(native.Uint32(datum.Value[0:4]))
vxlan.NoAge = vxlan.Age == 0 vxlan.NoAge = vxlan.Age == 0

View file

@ -331,24 +331,63 @@ func getNetlinkSocket(protocol int) (*NetlinkSocket, error) {
// moves back into it when done. If newNs is close, the socket will be opened // moves back into it when done. If newNs is close, the socket will be opened
// in the current network namespace. // in the current network namespace.
func GetNetlinkSocketAt(newNs, curNs netns.NsHandle, protocol int) (*NetlinkSocket, error) { func GetNetlinkSocketAt(newNs, curNs netns.NsHandle, protocol int) (*NetlinkSocket, error) {
var err error c, err := executeInNetns(newNs, curNs)
if err != nil {
return nil, err
}
defer c()
return getNetlinkSocket(protocol)
}
// executeInNetns sets execution of the code following this call to the
// network namespace newNs, then moves the thread back to curNs if open,
// otherwise to the current netns at the time the function was invoked
// In case of success, the caller is expected to execute the returned function
// at the end of the code that needs to be executed in the network namespace.
// Example:
// func jobAt(...) error {
// d, err := executeInNetns(...)
// if err != nil { return err}
// defer d()
// < code which needs to be executed in specific netns>
// }
// TODO: his function probably belongs to netns pkg.
func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) {
var (
err error
moveBack func(netns.NsHandle) error
closeNs func() error
unlockThd func()
)
restore := func() {
// order matters
if moveBack != nil {
moveBack(curNs)
}
if closeNs != nil {
closeNs()
}
if unlockThd != nil {
unlockThd()
}
}
if newNs.IsOpen() { if newNs.IsOpen() {
runtime.LockOSThread() runtime.LockOSThread()
defer runtime.UnlockOSThread() unlockThd = runtime.UnlockOSThread
if !curNs.IsOpen() { if !curNs.IsOpen() {
if curNs, err = netns.Get(); err != nil { if curNs, err = netns.Get(); err != nil {
restore()
return nil, fmt.Errorf("could not get current namespace while creating netlink socket: %v", err) return nil, fmt.Errorf("could not get current namespace while creating netlink socket: %v", err)
} }
defer curNs.Close() closeNs = curNs.Close
} }
if err := netns.Set(newNs); err != nil { if err := netns.Set(newNs); err != nil {
restore()
return nil, fmt.Errorf("failed to set into network namespace %d while creating netlink socket: %v", newNs, err) return nil, fmt.Errorf("failed to set into network namespace %d while creating netlink socket: %v", newNs, err)
} }
defer netns.Set(curNs) moveBack = netns.Set
} }
return restore, nil
return getNetlinkSocket(protocol)
} }
// Create a netlink socket with a given protocol (e.g. NETLINK_ROUTE) // Create a netlink socket with a given protocol (e.g. NETLINK_ROUTE)
@ -377,6 +416,18 @@ func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) {
return s, nil return s, nil
} }
// SubscribeAt works like Subscribe plus let's the caller choose the network
// namespace in which the socket would be opened (newNs). Then control goes back
// to curNs if open, otherwise to the netns at the time this function was called.
func SubscribeAt(newNs, curNs netns.NsHandle, protocol int, groups ...uint) (*NetlinkSocket, error) {
c, err := executeInNetns(newNs, curNs)
if err != nil {
return nil, err
}
defer c()
return Subscribe(protocol, groups...)
}
func (s *NetlinkSocket) Close() { func (s *NetlinkSocket) Close() {
syscall.Close(s.fd) syscall.Close(s.fd)
s.fd = -1 s.fd = -1

View file

@ -10,6 +10,7 @@ const (
SizeofXfrmUsersaInfo = 0xe0 SizeofXfrmUsersaInfo = 0xe0
SizeofXfrmAlgo = 0x44 SizeofXfrmAlgo = 0x44
SizeofXfrmAlgoAuth = 0x48 SizeofXfrmAlgoAuth = 0x48
SizeofXfrmAlgoAEAD = 0x48
SizeofXfrmEncapTmpl = 0x18 SizeofXfrmEncapTmpl = 0x18
SizeofXfrmUsersaFlush = 0x8 SizeofXfrmUsersaFlush = 0x8
) )
@ -194,6 +195,35 @@ func (msg *XfrmAlgoAuth) Serialize() []byte {
// char alg_key[0]; // char alg_key[0];
// } // }
type XfrmAlgoAEAD struct {
AlgName [64]byte
AlgKeyLen uint32
AlgICVLen uint32
AlgKey []byte
}
func (msg *XfrmAlgoAEAD) Len() int {
return SizeofXfrmAlgoAEAD + int(msg.AlgKeyLen/8)
}
func DeserializeXfrmAlgoAEAD(b []byte) *XfrmAlgoAEAD {
ret := XfrmAlgoAEAD{}
copy(ret.AlgName[:], b[0:64])
ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64]))
ret.AlgICVLen = *(*uint32)(unsafe.Pointer(&b[68]))
ret.AlgKey = b[72:ret.Len()]
return &ret
}
func (msg *XfrmAlgoAEAD) Serialize() []byte {
b := make([]byte, msg.Len())
copy(b[0:64], msg.AlgName[:])
copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:])
copy(b[68:72], (*(*[4]byte)(unsafe.Pointer(&msg.AlgICVLen)))[:])
copy(b[72:msg.Len()], msg.AlgKey[:])
return b
}
// struct xfrm_encap_tmpl { // struct xfrm_encap_tmpl {
// __u16 encap_type; // __u16 encap_type;
// __be16 encap_sport; // __be16 encap_sport;

View file

@ -6,6 +6,7 @@ import (
"syscall" "syscall"
"github.com/vishvananda/netlink/nl" "github.com/vishvananda/netlink/nl"
"github.com/vishvananda/netns"
) )
// RtAttr is shared so it is in netlink_linux.go // RtAttr is shared so it is in netlink_linux.go
@ -421,7 +422,17 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) {
// RouteSubscribe takes a chan down which notifications will be sent // RouteSubscribe takes a chan down which notifications will be sent
// when routes are added or deleted. Close the 'done' chan to stop subscription. // when routes are added or deleted. Close the 'done' chan to stop subscription.
func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error { func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error {
s, err := nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_ROUTE, syscall.RTNLGRP_IPV6_ROUTE) return routeSubscribeAt(netns.None(), netns.None(), ch, done)
}
// RouteSubscribeAt works like RouteSubscribe plus it allows the caller
// to choose the network namespace in which to subscribe (ns).
func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error {
return routeSubscribeAt(ns, netns.None(), ch, done)
}
func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error {
s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_ROUTE, syscall.RTNLGRP_IPV6_ROUTE)
if err != nil { if err != nil {
return err return err
} }

View file

@ -10,10 +10,18 @@ type XfrmStateAlgo struct {
Name string Name string
Key []byte Key []byte
TruncateLen int // Auth only TruncateLen int // Auth only
ICVLen int // AEAD only
} }
func (a XfrmStateAlgo) String() string { func (a XfrmStateAlgo) String() string {
return fmt.Sprintf("{Name: %s, Key: 0x%x, TruncateLen: %d}", a.Name, a.Key, a.TruncateLen) base := fmt.Sprintf("{Name: %s, Key: 0x%x", a.Name, a.Key)
if a.TruncateLen != 0 {
base = fmt.Sprintf("%s, Truncate length: %d", base, a.TruncateLen)
}
if a.ICVLen != 0 {
base = fmt.Sprintf("%s, ICV length: %d", base, a.ICVLen)
}
return fmt.Sprintf("%s}", base)
} }
// EncapType is an enum representing the optional packet encapsulation. // EncapType is an enum representing the optional packet encapsulation.
@ -73,12 +81,13 @@ type XfrmState struct {
Mark *XfrmMark Mark *XfrmMark
Auth *XfrmStateAlgo Auth *XfrmStateAlgo
Crypt *XfrmStateAlgo Crypt *XfrmStateAlgo
Aead *XfrmStateAlgo
Encap *XfrmStateEncap Encap *XfrmStateEncap
} }
func (sa XfrmState) String() string { func (sa XfrmState) String() string {
return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, Auth: %v, Crypt: %v, Encap: %v", return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, Auth: %v, Crypt: %v, Aead: %v,Encap: %v",
sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.Auth, sa.Crypt, sa.Encap) sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.Auth, sa.Crypt, sa.Aead, sa.Encap)
} }
func (sa XfrmState) Print(stats bool) string { func (sa XfrmState) Print(stats bool) string {
if !stats { if !stats {

View file

@ -35,6 +35,20 @@ func writeStateAlgoAuth(a *XfrmStateAlgo) []byte {
return algo.Serialize() return algo.Serialize()
} }
func writeStateAlgoAead(a *XfrmStateAlgo) []byte {
algo := nl.XfrmAlgoAEAD{
AlgKeyLen: uint32(len(a.Key) * 8),
AlgICVLen: uint32(a.ICVLen),
AlgKey: a.Key,
}
end := len(a.Name)
if end > 64 {
end = 64
}
copy(algo.AlgName[:end], a.Name)
return algo.Serialize()
}
func writeMark(m *XfrmMark) []byte { func writeMark(m *XfrmMark) []byte {
mark := &nl.XfrmMark{ mark := &nl.XfrmMark{
Value: m.Value, Value: m.Value,
@ -97,6 +111,10 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error {
out := nl.NewRtAttr(nl.XFRMA_ALG_CRYPT, writeStateAlgo(state.Crypt)) out := nl.NewRtAttr(nl.XFRMA_ALG_CRYPT, writeStateAlgo(state.Crypt))
req.AddData(out) req.AddData(out)
} }
if state.Aead != nil {
out := nl.NewRtAttr(nl.XFRMA_ALG_AEAD, writeStateAlgoAead(state.Aead))
req.AddData(out)
}
if state.Encap != nil { if state.Encap != nil {
encapData := make([]byte, nl.SizeofXfrmEncapTmpl) encapData := make([]byte, nl.SizeofXfrmEncapTmpl)
encap := nl.DeserializeXfrmEncapTmpl(encapData) encap := nl.DeserializeXfrmEncapTmpl(encapData)
@ -271,6 +289,12 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) {
state.Auth.Name = nl.BytesToString(algo.AlgName[:]) state.Auth.Name = nl.BytesToString(algo.AlgName[:])
state.Auth.Key = algo.AlgKey state.Auth.Key = algo.AlgKey
state.Auth.TruncateLen = int(algo.AlgTruncLen) state.Auth.TruncateLen = int(algo.AlgTruncLen)
case nl.XFRMA_ALG_AEAD:
state.Aead = new(XfrmStateAlgo)
algo := nl.DeserializeXfrmAlgoAEAD(attr.Value[:])
state.Aead.Name = nl.BytesToString(algo.AlgName[:])
state.Aead.Key = algo.AlgKey
state.Aead.ICVLen = int(algo.AlgICVLen)
case nl.XFRMA_ENCAP: case nl.XFRMA_ENCAP:
encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:]) encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:])
state.Encap = new(XfrmStateEncap) state.Encap = new(XfrmStateEncap)