Selaa lähdekoodia

Update drain test

With the rolling update there can be a possibility
that the container count matches the update has
completely finished yet.

The actual bug for the flakiness was fixed with the
swarmkit update.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
Tonis Tiigi 9 vuotta sitten
vanhempi
commit
b38408fd02
1 muutettua tiedostoa jossa 7 lisäystä ja 10 poistoa
  1. 7 10
      integration-cli/docker_api_swarm_test.go

+ 7 - 10
integration-cli/docker_api_swarm_test.go

@@ -432,14 +432,13 @@ func (s *DockerSwarmSuite) TestApiSwarmNodeDrainPause(c *check.C) {
 		n.Spec.Availability = swarm.NodeAvailabilityActive
 	})
 
-	// change environment variable, resulting balanced rescheduling
-	d1.updateService(c, d1.getService(c, id), func(s *swarm.Service) {
-		s.Spec.TaskTemplate.ContainerSpec.Env = []string{"FOO=BAR"}
-		s.Spec.UpdateConfig = &swarm.UpdateConfig{
-			Parallelism: 2,
-			Delay:       250 * time.Millisecond,
-		}
-	})
+	instances = 1
+	d1.updateService(c, d1.getService(c, id), setInstances(instances))
+
+	waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances)
+
+	instances = 8
+	d1.updateService(c, d1.getService(c, id), setInstances(instances))
 
 	// drained node first so we don't get any old containers
 	waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0)
@@ -453,8 +452,6 @@ func (s *DockerSwarmSuite) TestApiSwarmNodeDrainPause(c *check.C) {
 		n.Spec.Availability = swarm.NodeAvailabilityPause
 	})
 
-	c.Skip("known flakiness with scaling up from this state")
-
 	instances = 14
 	d1.updateService(c, d1.getService(c, id), setInstances(instances))