mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Update drain test
With the rolling update there can be a possibility that the container count matches the update has completely finished yet. The actual bug for the flakiness was fixed with the swarmkit update. Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
parent
25a89d73e5
commit
b38408fd02
1 changed files with 7 additions and 10 deletions
|
@ -432,14 +432,13 @@ func (s *DockerSwarmSuite) TestApiSwarmNodeDrainPause(c *check.C) {
|
|||
n.Spec.Availability = swarm.NodeAvailabilityActive
|
||||
})
|
||||
|
||||
// change environment variable, resulting balanced rescheduling
|
||||
d1.updateService(c, d1.getService(c, id), func(s *swarm.Service) {
|
||||
s.Spec.TaskTemplate.ContainerSpec.Env = []string{"FOO=BAR"}
|
||||
s.Spec.UpdateConfig = &swarm.UpdateConfig{
|
||||
Parallelism: 2,
|
||||
Delay: 250 * time.Millisecond,
|
||||
}
|
||||
})
|
||||
instances = 1
|
||||
d1.updateService(c, d1.getService(c, id), setInstances(instances))
|
||||
|
||||
waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances)
|
||||
|
||||
instances = 8
|
||||
d1.updateService(c, d1.getService(c, id), setInstances(instances))
|
||||
|
||||
// drained node first so we don't get any old containers
|
||||
waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0)
|
||||
|
@ -453,8 +452,6 @@ func (s *DockerSwarmSuite) TestApiSwarmNodeDrainPause(c *check.C) {
|
|||
n.Spec.Availability = swarm.NodeAvailabilityPause
|
||||
})
|
||||
|
||||
c.Skip("known flakiness with scaling up from this state")
|
||||
|
||||
instances = 14
|
||||
d1.updateService(c, d1.getService(c, id), setInstances(instances))
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue