Browse Source

Merge pull request #35811 from abhi/vendor

Vendoring swarmkit a6519e28ff2a558f5d32b2dab9fcb0882879b398
Yong Tang 7 years ago
parent
commit
26bc976ac9

+ 1 - 1
vendor.conf

@@ -114,7 +114,7 @@ github.com/dmcgowan/go-tar go1.10
 github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f
 
 # cluster
-github.com/docker/swarmkit 4429c763170d9ca96929249353c3270c19e7d39e
+github.com/docker/swarmkit a6519e28ff2a558f5d32b2dab9fcb0882879b398 
 github.com/gogo/protobuf v0.4
 github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
 github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e

+ 5 - 0
vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go

@@ -404,6 +404,11 @@ func (na *cnmNetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(
 	vipLoop:
 		for _, vip := range s.Endpoint.VirtualIPs {
 			if na.IsVIPOnIngressNetwork(vip) && networkallocator.IsIngressNetworkNeeded(s) {
+				// This checks the condition when ingress network is needed
+				// but allocation has not been done.
+				if _, ok := na.services[s.ID]; !ok {
+					return false
+				}
 				continue vipLoop
 			}
 			for _, net := range specNetworks {

+ 31 - 12
vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go

@@ -324,9 +324,18 @@ func (pa *portAllocator) isPortsAllocatedOnInit(s *api.Service, onInit bool) boo
 	}
 
 	portStates := allocatedPorts{}
+	hostTargetPorts := map[uint32]struct{}{}
 	for _, portState := range s.Endpoint.Ports {
-		if portState.PublishMode == api.PublishModeIngress {
+		switch portState.PublishMode {
+		case api.PublishModeIngress:
 			portStates.addState(portState)
+		case api.PublishModeHost:
+			// build a map of host mode ports we've seen. if in the spec we get
+			// a host port that's not in the service, then we need to do
+			// allocation. if we get the same target port but something else
+			// has changed, then HostPublishPortsNeedUpdate will cover that
+			// case. see docker/swarmkit#2376
+			hostTargetPorts[portState.TargetPort] = struct{}{}
 		}
 	}
 
@@ -344,18 +353,28 @@ func (pa *portAllocator) isPortsAllocatedOnInit(s *api.Service, onInit bool) boo
 	// Iterate portConfigs with PublishedPort == 0 (low priority)
 	for _, portConfig := range s.Spec.Endpoint.Ports {
 		// Ignore ports which are not PublishModeIngress
-		if portConfig.PublishMode != api.PublishModeIngress {
-			continue
-		}
-		if portConfig.PublishedPort == 0 && portStates.delState(portConfig) == nil {
-			return false
-		}
+		switch portConfig.PublishMode {
+		case api.PublishModeIngress:
+			if portConfig.PublishedPort == 0 && portStates.delState(portConfig) == nil {
+				return false
+			}
 
-		// If SwarmPort was not defined by user and the func
-		// is called during allocator initialization state then
-		// we are not allocated.
-		if portConfig.PublishedPort == 0 && onInit {
-			return false
+			// If SwarmPort was not defined by user and the func
+			// is called during allocator initialization state then
+			// we are not allocated.
+			if portConfig.PublishedPort == 0 && onInit {
+				return false
+			}
+		case api.PublishModeHost:
+			// check if the target port is already in the port config. if it
+			// isn't, then it's our problem.
+			if _, ok := hostTargetPorts[portConfig.TargetPort]; !ok {
+				return false
+			}
+			// NOTE(dperny) there could be a further case where we check if
+			// there are host ports in the config that aren't in the spec, but
+			// that's only possible if there's a mismatch in the number of
+			// ports, which is handled by a length check earlier in the code
 		}
 	}
 

+ 15 - 1
vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/slot.go

@@ -12,6 +12,8 @@ type slotsByRunningState []orchestrator.Slot
 func (is slotsByRunningState) Len() int      { return len(is) }
 func (is slotsByRunningState) Swap(i, j int) { is[i], is[j] = is[j], is[i] }
 
+// Less returns true if the first task should be preferred over the second task,
+// all other things being equal in terms of node balance.
 func (is slotsByRunningState) Less(i, j int) bool {
 	iRunning := false
 	jRunning := false
@@ -29,7 +31,19 @@ func (is slotsByRunningState) Less(i, j int) bool {
 		}
 	}
 
-	return iRunning && !jRunning
+	if iRunning && !jRunning {
+		return true
+	}
+
+	if !iRunning && jRunning {
+		return false
+	}
+
+	// Use Slot number as a tie-breaker to prefer to remove tasks in reverse
+	// order of Slot number. This would help us avoid unnecessary master
+	// migration when scaling down a stateful service because the master
+	// task of a stateful service is usually in a low numbered Slot.
+	return is[i][0].Slot < is[j][0].Slot
 }
 
 type slotWithIndex struct {