Merge pull request #31030 from aboch/c1.13.x-2

[1.13.x] Vendoring swarmkit @c7df892
This commit is contained in:
Alexander Morozov 2017-02-15 09:32:45 -08:00 committed by GitHub
commit b3b30b0dcd
6 changed files with 19 additions and 12 deletions

View file

@ -101,7 +101,7 @@ github.com/docker/containerd aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
# cluster
github.com/docker/swarmkit 1c7f003d75f091d5f7051ed982594420e4515f77
github.com/docker/swarmkit c7df892262aa0bec0a3e52ea76219b7b364ded38
github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
github.com/gogo/protobuf v0.3
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a

View file

@ -289,8 +289,9 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
if a.taskAllocateVote(networkVoter, t.ID) {
// If the task is not attached to any network, network
// allocators job is done. Immediately cast a vote so
// that the task can be moved to ALLOCATED state as
// that the task can be moved to the PENDING state as
// soon as possible.
updateTaskStatus(t, api.TaskStatePending, allocatedStatusMessage)
allocatedTasks = append(allocatedTasks, t)
}
continue
@ -467,7 +468,7 @@ func taskDead(t *api.Task) bool {
}
// taskReadyForNetworkVote checks if the task is ready for a network
// vote to move it to ALLOCATED state.
// vote to move it to PENDING state.
func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bool {
// Task is ready for vote if the following is true:
//

View file

@ -272,7 +272,8 @@ func (pa *portAllocator) portsAllocatedInHostPublishMode(s *api.Service) bool {
if s.Spec.Endpoint != nil {
for _, portConfig := range s.Spec.Endpoint.Ports {
if portConfig.PublishMode == api.PublishModeHost {
if portConfig.PublishMode == api.PublishModeHost &&
portConfig.PublishedPort != 0 {
if portStates.delState(portConfig) == nil {
return false
}

View file

@ -200,8 +200,6 @@ func (k *KeyManager) Run(ctx context.Context) error {
} else {
k.keyRing.lClock = cluster.EncryptionKeyLamportClock
k.keyRing.keys = cluster.NetworkBootstrapKeys
k.rotateKey(ctx)
}
ticker := time.NewTicker(k.config.RotationInterval)

View file

@ -504,7 +504,7 @@ func (g *Orchestrator) removeTasks(ctx context.Context, batch *store.Batch, task
}
func isTaskRunning(t *api.Task) bool {
return t != nil && t.DesiredState <= api.TaskStateRunning && t.Status.State <= api.TaskStateRunning
return t != nil && t.DesiredState <= api.TaskStateRunning
}
func isTaskCompleted(t *api.Task, restartPolicy api.RestartPolicy_RestartCondition) bool {

View file

@ -131,11 +131,13 @@ func (tr *TaskReaper) tick() {
}
defer func() {
tr.dirty = make(map[instanceTuple]struct{})
tr.orphaned = nil
}()
deleteTasks := tr.orphaned
deleteTasks := make(map[string]struct{})
for _, tID := range tr.orphaned {
deleteTasks[tID] = struct{}{}
}
tr.store.View(func(tx store.ReadTx) {
for dirty := range tr.dirty {
service := store.GetService(tx, dirty.serviceID)
@ -180,13 +182,15 @@ func (tr *TaskReaper) tick() {
// instead of sorting the whole slice.
sort.Sort(tasksByTimestamp(historicTasks))
runningTasks := 0
for _, t := range historicTasks {
if t.DesiredState <= api.TaskStateRunning {
if t.DesiredState <= api.TaskStateRunning || t.Status.State <= api.TaskStateRunning {
// Don't delete running tasks
runningTasks++
continue
}
deleteTasks = append(deleteTasks, t.ID)
deleteTasks[t.ID] = struct{}{}
taskHistory++
if int64(len(historicTasks)) <= taskHistory {
@ -194,12 +198,15 @@ func (tr *TaskReaper) tick() {
}
}
if runningTasks <= 1 {
delete(tr.dirty, dirty)
}
}
})
if len(deleteTasks) > 0 {
tr.store.Batch(func(batch *store.Batch) error {
for _, taskID := range deleteTasks {
for taskID := range deleteTasks {
batch.Update(func(tx store.Tx) error {
return store.DeleteTask(tx, taskID)
})