1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Update drain test

With the rolling update there can be a possibility
that the container count matches the update has
completely finished yet.

The actual bug for the flakiness was fixed with the
swarmkit update.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi 2016-06-14 17:03:46 -07:00
parent 25a89d73e5
commit b38408fd02

View file

@ -432,14 +432,13 @@ func (s *DockerSwarmSuite) TestApiSwarmNodeDrainPause(c *check.C) {
n.Spec.Availability = swarm.NodeAvailabilityActive n.Spec.Availability = swarm.NodeAvailabilityActive
}) })
// change environment variable, resulting balanced rescheduling instances = 1
d1.updateService(c, d1.getService(c, id), func(s *swarm.Service) { d1.updateService(c, d1.getService(c, id), setInstances(instances))
s.Spec.TaskTemplate.ContainerSpec.Env = []string{"FOO=BAR"}
s.Spec.UpdateConfig = &swarm.UpdateConfig{ waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances)
Parallelism: 2,
Delay: 250 * time.Millisecond, instances = 8
} d1.updateService(c, d1.getService(c, id), setInstances(instances))
})
// drained node first so we don't get any old containers // drained node first so we don't get any old containers
waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0)
@ -453,8 +452,6 @@ func (s *DockerSwarmSuite) TestApiSwarmNodeDrainPause(c *check.C) {
n.Spec.Availability = swarm.NodeAvailabilityPause n.Spec.Availability = swarm.NodeAvailabilityPause
}) })
c.Skip("known flakiness with scaling up from this state")
instances = 14 instances = 14
d1.updateService(c, d1.getService(c, id), setInstances(instances)) d1.updateService(c, d1.getService(c, id), setInstances(instances))