Vendor swarmkit for 1.12.3
This updates swarmkit to include a few important bug fixes, including some fixes for regressions. Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
This commit is contained in:
parent
bb80604a0b
commit
109b53d882
5 changed files with 80 additions and 34 deletions
|
@ -139,7 +139,7 @@ clone git github.com/docker/docker-credential-helpers v0.3.0
|
|||
clone git github.com/docker/containerd 0366d7e9693c930cf18c0f50cc16acec064e96c5
|
||||
|
||||
# cluster
|
||||
clone git github.com/docker/swarmkit e239bc901fd6f5c85b36904e89f1b64c8c0635f2
|
||||
clone git github.com/docker/swarmkit 938530a15c8a0374b367f2b94ddfd8e8b9b61bad
|
||||
clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
|
||||
clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028
|
||||
clone git github.com/cloudflare/cfssl b895b0549c0ff676f92cf09ba971ae02bb41367b
|
||||
|
|
|
@ -125,6 +125,8 @@ func (a *Allocator) Run(ctx context.Context) error {
|
|||
aaCopy := aa
|
||||
actor := func() error {
|
||||
wg.Add(1)
|
||||
defer wg.Done()
|
||||
|
||||
// init might return an allocator specific context
|
||||
// which is a child of the passed in context to hold
|
||||
// allocator specific state
|
||||
|
@ -133,10 +135,10 @@ func (a *Allocator) Run(ctx context.Context) error {
|
|||
// if we are failing in the init of
|
||||
// this allocator.
|
||||
aa.cancel()
|
||||
wg.Done()
|
||||
return err
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
a.run(ctx, aaCopy)
|
||||
|
|
|
@ -67,7 +67,7 @@ type networkContext struct {
|
|||
unallocatedNetworks map[string]*api.Network
|
||||
}
|
||||
|
||||
func (a *Allocator) doNetworkInit(ctx context.Context) error {
|
||||
func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
||||
na, err := networkallocator.New()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -80,6 +80,13 @@ func (a *Allocator) doNetworkInit(ctx context.Context) error {
|
|||
unallocatedNetworks: make(map[string]*api.Network),
|
||||
ingressNetwork: newIngressNetwork(),
|
||||
}
|
||||
a.netCtx = nc
|
||||
defer func() {
|
||||
// Clear a.netCtx if initialization was unsuccessful.
|
||||
if err != nil {
|
||||
a.netCtx = nil
|
||||
}
|
||||
}()
|
||||
|
||||
// Check if we have the ingress network. If not found create
|
||||
// it before reading all network objects for allocation.
|
||||
|
@ -124,7 +131,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) error {
|
|||
// that the we can get the preferred subnet for ingress
|
||||
// network.
|
||||
if !na.IsAllocated(nc.ingressNetwork) {
|
||||
if err := a.allocateNetwork(ctx, nc, nc.ingressNetwork); err != nil {
|
||||
if err := a.allocateNetwork(ctx, nc.ingressNetwork); err != nil {
|
||||
log.G(ctx).Errorf("failed allocating ingress network during init: %v", err)
|
||||
}
|
||||
|
||||
|
@ -154,7 +161,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) error {
|
|||
continue
|
||||
}
|
||||
|
||||
if err := a.allocateNetwork(ctx, nc, n); err != nil {
|
||||
if err := a.allocateNetwork(ctx, n); err != nil {
|
||||
log.G(ctx).Errorf("failed allocating network %s during init: %v", n.ID, err)
|
||||
}
|
||||
}
|
||||
|
@ -178,7 +185,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) error {
|
|||
}
|
||||
|
||||
node.Attachment.Network = nc.ingressNetwork.Copy()
|
||||
if err := a.allocateNode(ctx, nc, node); err != nil {
|
||||
if err := a.allocateNode(ctx, node); err != nil {
|
||||
log.G(ctx).Errorf("Failed to allocate network resources for node %s during init: %v", node.ID, err)
|
||||
}
|
||||
}
|
||||
|
@ -197,7 +204,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) error {
|
|||
continue
|
||||
}
|
||||
|
||||
if err := a.allocateService(ctx, nc, s); err != nil {
|
||||
if err := a.allocateService(ctx, s); err != nil {
|
||||
log.G(ctx).Errorf("failed allocating service %s during init: %v", s.ID, err)
|
||||
}
|
||||
}
|
||||
|
@ -259,7 +266,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) error {
|
|||
}
|
||||
|
||||
err := batch.Update(func(tx store.Tx) error {
|
||||
_, err := a.allocateTask(ctx, nc, tx, t)
|
||||
_, err := a.allocateTask(ctx, tx, t)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -273,7 +280,6 @@ func (a *Allocator) doNetworkInit(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
a.netCtx = nc
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -287,7 +293,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
break
|
||||
}
|
||||
|
||||
if err := a.allocateNetwork(ctx, nc, n); err != nil {
|
||||
if err := a.allocateNetwork(ctx, n); err != nil {
|
||||
log.G(ctx).Errorf("Failed allocation for network %s: %v", n.ID, err)
|
||||
break
|
||||
}
|
||||
|
@ -308,7 +314,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
break
|
||||
}
|
||||
|
||||
if err := a.allocateService(ctx, nc, s); err != nil {
|
||||
if err := a.allocateService(ctx, s); err != nil {
|
||||
log.G(ctx).Errorf("Failed allocation for service %s: %v", s.ID, err)
|
||||
break
|
||||
}
|
||||
|
@ -319,7 +325,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
break
|
||||
}
|
||||
|
||||
if err := a.allocateService(ctx, nc, s); err != nil {
|
||||
if err := a.allocateService(ctx, s); err != nil {
|
||||
log.G(ctx).Errorf("Failed allocation during update of service %s: %v", s.ID, err)
|
||||
break
|
||||
}
|
||||
|
@ -334,18 +340,18 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
// it's still there.
|
||||
delete(nc.unallocatedServices, s.ID)
|
||||
case state.EventCreateNode, state.EventUpdateNode, state.EventDeleteNode:
|
||||
a.doNodeAlloc(ctx, nc, ev)
|
||||
a.doNodeAlloc(ctx, ev)
|
||||
case state.EventCreateTask, state.EventUpdateTask, state.EventDeleteTask:
|
||||
a.doTaskAlloc(ctx, nc, ev)
|
||||
a.doTaskAlloc(ctx, ev)
|
||||
case state.EventCommit:
|
||||
a.procUnallocatedNetworks(ctx, nc)
|
||||
a.procUnallocatedServices(ctx, nc)
|
||||
a.procUnallocatedTasksNetwork(ctx, nc)
|
||||
a.procUnallocatedNetworks(ctx)
|
||||
a.procUnallocatedServices(ctx)
|
||||
a.procUnallocatedTasksNetwork(ctx)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Allocator) doNodeAlloc(ctx context.Context, nc *networkContext, ev events.Event) {
|
||||
func (a *Allocator) doNodeAlloc(ctx context.Context, ev events.Event) {
|
||||
var (
|
||||
isDelete bool
|
||||
node *api.Node
|
||||
|
@ -361,6 +367,8 @@ func (a *Allocator) doNodeAlloc(ctx context.Context, nc *networkContext, ev even
|
|||
node = v.Node.Copy()
|
||||
}
|
||||
|
||||
nc := a.netCtx
|
||||
|
||||
if isDelete {
|
||||
if nc.nwkAllocator.IsNodeAllocated(node) {
|
||||
if err := nc.nwkAllocator.DeallocateNode(node); err != nil {
|
||||
|
@ -376,8 +384,8 @@ func (a *Allocator) doNodeAlloc(ctx context.Context, nc *networkContext, ev even
|
|||
}
|
||||
|
||||
node.Attachment.Network = nc.ingressNetwork.Copy()
|
||||
if err := a.allocateNode(ctx, nc, node); err != nil {
|
||||
log.G(ctx).Errorf("Fauled to allocate network resources for node %s: %v", node.ID, err)
|
||||
if err := a.allocateNode(ctx, node); err != nil {
|
||||
log.G(ctx).Errorf("Failed to allocate network resources for node %s: %v", node.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -451,7 +459,7 @@ func (a *Allocator) taskCreateNetworkAttachments(t *api.Task, s *api.Service) {
|
|||
taskUpdateNetworks(t, networks)
|
||||
}
|
||||
|
||||
func (a *Allocator) doTaskAlloc(ctx context.Context, nc *networkContext, ev events.Event) {
|
||||
func (a *Allocator) doTaskAlloc(ctx context.Context, ev events.Event) {
|
||||
var (
|
||||
isDelete bool
|
||||
t *api.Task
|
||||
|
@ -467,6 +475,8 @@ func (a *Allocator) doTaskAlloc(ctx context.Context, nc *networkContext, ev even
|
|||
t = v.Task.Copy()
|
||||
}
|
||||
|
||||
nc := a.netCtx
|
||||
|
||||
// If the task has stopped running or it's being deleted then
|
||||
// we should free the network resources associated with the
|
||||
// task right away.
|
||||
|
@ -517,7 +527,9 @@ func (a *Allocator) doTaskAlloc(ctx context.Context, nc *networkContext, ev even
|
|||
nc.unallocatedTasks[t.ID] = t
|
||||
}
|
||||
|
||||
func (a *Allocator) allocateNode(ctx context.Context, nc *networkContext, node *api.Node) error {
|
||||
func (a *Allocator) allocateNode(ctx context.Context, node *api.Node) error {
|
||||
nc := a.netCtx
|
||||
|
||||
if err := nc.nwkAllocator.AllocateNode(node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -550,7 +562,9 @@ func (a *Allocator) allocateNode(ctx context.Context, nc *networkContext, node *
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *Allocator) allocateService(ctx context.Context, nc *networkContext, s *api.Service) error {
|
||||
func (a *Allocator) allocateService(ctx context.Context, s *api.Service) error {
|
||||
nc := a.netCtx
|
||||
|
||||
if s.Spec.Endpoint != nil {
|
||||
// service has user-defined endpoint
|
||||
if s.Endpoint == nil {
|
||||
|
@ -635,7 +649,9 @@ func (a *Allocator) allocateService(ctx context.Context, nc *networkContext, s *
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *Allocator) allocateNetwork(ctx context.Context, nc *networkContext, n *api.Network) error {
|
||||
func (a *Allocator) allocateNetwork(ctx context.Context, n *api.Network) error {
|
||||
nc := a.netCtx
|
||||
|
||||
if err := nc.nwkAllocator.Allocate(n); err != nil {
|
||||
nc.unallocatedNetworks[n.ID] = n
|
||||
return fmt.Errorf("failed during network allocation for network %s: %v", n.ID, err)
|
||||
|
@ -657,7 +673,7 @@ func (a *Allocator) allocateNetwork(ctx context.Context, nc *networkContext, n *
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *Allocator) allocateTask(ctx context.Context, nc *networkContext, tx store.Tx, t *api.Task) (*api.Task, error) {
|
||||
func (a *Allocator) allocateTask(ctx context.Context, tx store.Tx, t *api.Task) (*api.Task, error) {
|
||||
taskUpdated := false
|
||||
|
||||
// Get the latest task state from the store before updating.
|
||||
|
@ -666,6 +682,8 @@ func (a *Allocator) allocateTask(ctx context.Context, nc *networkContext, tx sto
|
|||
return nil, fmt.Errorf("could not find task %s while trying to update network allocation", t.ID)
|
||||
}
|
||||
|
||||
nc := a.netCtx
|
||||
|
||||
// We might be here even if a task allocation has already
|
||||
// happened but wasn't successfully committed to store. In such
|
||||
// cases skip allocation and go straight ahead to updating the
|
||||
|
@ -725,10 +743,11 @@ func (a *Allocator) allocateTask(ctx context.Context, nc *networkContext, tx sto
|
|||
return storeT, nil
|
||||
}
|
||||
|
||||
func (a *Allocator) procUnallocatedNetworks(ctx context.Context, nc *networkContext) {
|
||||
func (a *Allocator) procUnallocatedNetworks(ctx context.Context) {
|
||||
nc := a.netCtx
|
||||
for _, n := range nc.unallocatedNetworks {
|
||||
if !nc.nwkAllocator.IsAllocated(n) {
|
||||
if err := a.allocateNetwork(ctx, nc, n); err != nil {
|
||||
if err := a.allocateNetwork(ctx, n); err != nil {
|
||||
log.G(ctx).Debugf("Failed allocation of unallocated network %s: %v", n.ID, err)
|
||||
continue
|
||||
}
|
||||
|
@ -738,10 +757,11 @@ func (a *Allocator) procUnallocatedNetworks(ctx context.Context, nc *networkCont
|
|||
}
|
||||
}
|
||||
|
||||
func (a *Allocator) procUnallocatedServices(ctx context.Context, nc *networkContext) {
|
||||
func (a *Allocator) procUnallocatedServices(ctx context.Context) {
|
||||
nc := a.netCtx
|
||||
for _, s := range nc.unallocatedServices {
|
||||
if !nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if err := a.allocateService(ctx, nc, s); err != nil {
|
||||
if err := a.allocateService(ctx, s); err != nil {
|
||||
log.G(ctx).Debugf("Failed allocation of unallocated service %s: %v", s.ID, err)
|
||||
continue
|
||||
}
|
||||
|
@ -751,7 +771,8 @@ func (a *Allocator) procUnallocatedServices(ctx context.Context, nc *networkCont
|
|||
}
|
||||
}
|
||||
|
||||
func (a *Allocator) procUnallocatedTasksNetwork(ctx context.Context, nc *networkContext) {
|
||||
func (a *Allocator) procUnallocatedTasksNetwork(ctx context.Context) {
|
||||
nc := a.netCtx
|
||||
tasks := make([]*api.Task, 0, len(nc.unallocatedTasks))
|
||||
|
||||
committed, err := a.store.Batch(func(batch *store.Batch) error {
|
||||
|
@ -759,7 +780,7 @@ func (a *Allocator) procUnallocatedTasksNetwork(ctx context.Context, nc *network
|
|||
var allocatedT *api.Task
|
||||
err := batch.Update(func(tx store.Tx) error {
|
||||
var err error
|
||||
allocatedT, err = a.allocateTask(ctx, nc, tx, t)
|
||||
allocatedT, err = a.allocateTask(ctx, tx, t)
|
||||
return err
|
||||
})
|
||||
|
||||
|
|
|
@ -149,13 +149,25 @@ func validateEndpointSpec(epSpec *api.EndpointSpec) error {
|
|||
return grpc.Errorf(codes.InvalidArgument, "EndpointSpec: ports can't be used with dnsrr mode")
|
||||
}
|
||||
|
||||
portSet := make(map[uint32]struct{})
|
||||
type portSpec struct {
|
||||
publishedPort uint32
|
||||
protocol api.PortConfig_Protocol
|
||||
}
|
||||
|
||||
portSet := make(map[portSpec]struct{})
|
||||
for _, port := range epSpec.Ports {
|
||||
if _, ok := portSet[port.PublishedPort]; ok {
|
||||
// If published port is not specified, it does not conflict
|
||||
// with any others.
|
||||
if port.PublishedPort == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
portSpec := portSpec{publishedPort: port.PublishedPort, protocol: port.Protocol}
|
||||
if _, ok := portSet[portSpec]; ok {
|
||||
return grpc.Errorf(codes.InvalidArgument, "EndpointSpec: duplicate published ports provided")
|
||||
}
|
||||
|
||||
portSet[port.PublishedPort] = struct{}{}
|
||||
portSet[portSpec] = struct{}{}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -2,6 +2,7 @@ package orchestrator
|
|||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -76,6 +77,9 @@ func (r *RestartSupervisor) waitRestart(ctx context.Context, oldDelay *delayedSt
|
|||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
if t.DesiredState > api.TaskStateRunning {
|
||||
return nil
|
||||
}
|
||||
service := store.GetService(tx, t.ServiceID)
|
||||
if service == nil {
|
||||
return nil
|
||||
|
@ -108,6 +112,13 @@ func (r *RestartSupervisor) Restart(ctx context.Context, tx store.Tx, cluster *a
|
|||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
// Sanity check: was the task shut down already by a separate call to
|
||||
// Restart? If so, we must avoid restarting it, because this will create
|
||||
// an extra task. This should never happen unless there is a bug.
|
||||
if t.DesiredState > api.TaskStateRunning {
|
||||
return errors.New("Restart called on task that was already shut down")
|
||||
}
|
||||
|
||||
t.DesiredState = api.TaskStateShutdown
|
||||
err := store.UpdateTask(tx, &t)
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in a new issue