From 66cb1222d6559e120d9d1a29932aa778aa517894 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Wed, 24 Oct 2018 22:49:33 -0700 Subject: [PATCH 1/6] docker_cli_swarm_test.go: rm unused arg Since commit 17173efbe00 checkSwarmLockedToUnlocked() no longer require its third argument, so remove it. Signed-off-by: Kir Kolyshkin --- integration-cli/docker_cli_swarm_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/integration-cli/docker_cli_swarm_test.go b/integration-cli/docker_cli_swarm_test.go index 9f99d0c849..1ced7879f9 100644 --- a/integration-cli/docker_cli_swarm_test.go +++ b/integration-cli/docker_cli_swarm_test.go @@ -1009,7 +1009,7 @@ func checkKeyIsEncrypted(d *daemon.Daemon) func(*check.C) (interface{}, check.Co } } -func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Daemon, unlockKey string) { +func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Daemon) { // Wait for the PEM file to become unencrypted waitAndAssert(c, defaultReconciliationTimeout, checkKeyIsEncrypted(d), checker.Equals, false) @@ -1100,7 +1100,7 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { outs, err = d.Cmd("swarm", "update", "--autolock=false") c.Assert(err, checker.IsNil, check.Commentf("%s", outs)) - checkSwarmLockedToUnlocked(c, d, unlockKey) + checkSwarmLockedToUnlocked(c, d) outs, err = d.Cmd("node", "ls") c.Assert(err, checker.IsNil, check.Commentf("%s", outs)) @@ -1195,7 +1195,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { // the ones that got the update are now set to unlocked for _, d := range []*daemon.Daemon{d1, d3} { - checkSwarmLockedToUnlocked(c, d, unlockKey) + checkSwarmLockedToUnlocked(c, d) } // d2 still locked @@ -1208,7 +1208,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) // once it's caught up, d2 is set to not be locked - checkSwarmLockedToUnlocked(c, d2, unlockKey) + checkSwarmLockedToUnlocked(c, d2) // managers who join now are never set to locked in the first place d4 := s.AddDaemon(c, true, true) @@ -1488,7 +1488,7 @@ func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *check.C) { outs, err = d.Cmd("swarm", "update", "--autolock=false") c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) - checkSwarmLockedToUnlocked(c, d, unlockKey) + checkSwarmLockedToUnlocked(c, d) } } From 73baee2dcf546b2561bdd9a500b0af08cb62b1be Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Fri, 5 Oct 2018 11:10:28 -0700 Subject: [PATCH 2/6] integration-cli: fix netns test cleanup 1. Using MNT_FORCE flag does not make sense for nsfs. Using MNT_DETACH though might help. 2. When -check.vv is added to TESTFLAGS, there are a lot of messages like this one: > unmount of /tmp/dxr/d847fd103a4ba/netns failed: invalid argument and some like > unmount of /tmp/dxr/dd245af642d94/netns failed: no such file or directory The first one means directory is not a mount point, the second one means it's gone. Do ignore both of these. Signed-off-by: Kir Kolyshkin --- internal/test/daemon/daemon_unix.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/test/daemon/daemon_unix.go b/internal/test/daemon/daemon_unix.go index 9dd9e36f0c..eb604fec75 100644 --- a/internal/test/daemon/daemon_unix.go +++ b/internal/test/daemon/daemon_unix.go @@ -21,7 +21,7 @@ func cleanupNetworkNamespace(t testingT, execRoot string) { // new exec root. netnsPath := filepath.Join(execRoot, "netns") filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error { - if err := unix.Unmount(path, unix.MNT_FORCE); err != nil { + if err := unix.Unmount(path, unix.MNT_DETACH); err != nil && err != unix.EINVAL && err != unix.ENOENT { t.Logf("unmount of %s failed: %v", path, err) } os.Remove(path) From 6016520162fdcb19f50d08c4f0b54b06a7a6eac0 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Thu, 25 Oct 2018 01:08:45 -0700 Subject: [PATCH 3/6] internal/test/daemon: don't leak timers A timer is leaking on every daemon start and stop. Probably nothing major, but given the amount of daemon starts/stops during tests, it's better to be accurate about it. Signed-off-by: Kir Kolyshkin --- internal/test/daemon/daemon.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/internal/test/daemon/daemon.go b/internal/test/daemon/daemon.go index d9126f9f0c..7471d1093b 100644 --- a/internal/test/daemon/daemon.go +++ b/internal/test/daemon/daemon.go @@ -285,7 +285,10 @@ func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { d.Wait = wait - tick := time.Tick(500 * time.Millisecond) + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + tick := ticker.C + // make sure daemon is ready to receive requests startTime := time.Now().Unix() for { @@ -423,7 +426,9 @@ func (d *Daemon) StopWithError() error { }() i := 1 - tick := time.Tick(time.Second) + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + tick := ticker.C if err := d.cmd.Process.Signal(os.Interrupt); err != nil { if strings.Contains(err.Error(), "os: process already finished") { From 24cbb9897193894f4716583d1861091ab2fa1ae2 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Mon, 29 Oct 2018 13:46:21 -0700 Subject: [PATCH 4/6] docker_cli_swarm_test: factor out common code This is repeated 6 times in different tests, with slight minor variations. Let's factor it out, for clarity. While at it, simplify the code: instead of more complex parsing of "docker swarm init|update --autolock" output (1) and checking if the key is also present in "docker swarm unlock-key" output (2), get the key from (2) and check it is present in (1). Signed-off-by: Kir Kolyshkin --- integration-cli/docker_cli_swarm_test.go | 108 ++++------------------- 1 file changed, 19 insertions(+), 89 deletions(-) diff --git a/integration-cli/docker_cli_swarm_test.go b/integration-cli/docker_cli_swarm_test.go index 1ced7879f9..02d922e100 100644 --- a/integration-cli/docker_cli_swarm_test.go +++ b/integration-cli/docker_cli_swarm_test.go @@ -1055,22 +1055,7 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { outs, err := d.Cmd("swarm", "init", "--autolock") c.Assert(err, checker.IsNil, check.Commentf("%s", outs)) - - c.Assert(outs, checker.Contains, "docker swarm unlock") - - var unlockKey string - for _, line := range strings.Split(outs, "\n") { - if strings.Contains(line, "SWMKEY") { - unlockKey = strings.TrimSpace(line) - break - } - } - - c.Assert(unlockKey, checker.Not(checker.Equals), "") - - outs, err = d.Cmd("swarm", "unlock-key", "-q") - c.Assert(err, checker.IsNil, check.Commentf("%s", outs)) - c.Assert(outs, checker.Equals, unlockKey+"\n") + unlockKey := getUnlockKey(d, c, outs) c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) @@ -1155,22 +1140,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { // enable autolock outs, err := d1.Cmd("swarm", "update", "--autolock") c.Assert(err, checker.IsNil, check.Commentf("%s", outs)) - - c.Assert(outs, checker.Contains, "docker swarm unlock") - - var unlockKey string - for _, line := range strings.Split(outs, "\n") { - if strings.Contains(line, "SWMKEY") { - unlockKey = strings.TrimSpace(line) - break - } - } - - c.Assert(unlockKey, checker.Not(checker.Equals), "") - - outs, err = d1.Cmd("swarm", "unlock-key", "-q") - c.Assert(err, checker.IsNil) - c.Assert(outs, checker.Equals, unlockKey+"\n") + unlockKey := getUnlockKey(d1, c, outs) // The ones that got the cluster update should be set to locked for _, d := range []*daemon.Daemon{d1, d3} { @@ -1222,22 +1192,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) { // enable autolock outs, err := d1.Cmd("swarm", "update", "--autolock") c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) - - c.Assert(outs, checker.Contains, "docker swarm unlock") - - var unlockKey string - for _, line := range strings.Split(outs, "\n") { - if strings.Contains(line, "SWMKEY") { - unlockKey = strings.TrimSpace(line) - break - } - } - - c.Assert(unlockKey, checker.Not(checker.Equals), "") - - outs, err = d1.Cmd("swarm", "unlock-key", "-q") - c.Assert(err, checker.IsNil) - c.Assert(outs, checker.Equals, unlockKey+"\n") + unlockKey := getUnlockKey(d1, c, outs) // joined workers start off unlocked d2 := s.AddDaemon(c, true, false) @@ -1295,22 +1250,7 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { outs, err := d.Cmd("swarm", "update", "--autolock") c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) - - c.Assert(outs, checker.Contains, "docker swarm unlock") - - var unlockKey string - for _, line := range strings.Split(outs, "\n") { - if strings.Contains(line, "SWMKEY") { - unlockKey = strings.TrimSpace(line) - break - } - } - - c.Assert(unlockKey, checker.Not(checker.Equals), "") - - outs, err = d.Cmd("swarm", "unlock-key", "-q") - c.Assert(err, checker.IsNil) - c.Assert(outs, checker.Equals, unlockKey+"\n") + unlockKey := getUnlockKey(d, c, outs) // Rotate multiple times for i := 0; i != 3; i++ { @@ -1380,22 +1320,7 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { outs, err := d1.Cmd("swarm", "update", "--autolock") c.Assert(err, checker.IsNil, check.Commentf("%s", outs)) - - c.Assert(outs, checker.Contains, "docker swarm unlock") - - var unlockKey string - for _, line := range strings.Split(outs, "\n") { - if strings.Contains(line, "SWMKEY") { - unlockKey = strings.TrimSpace(line) - break - } - } - - c.Assert(unlockKey, checker.Not(checker.Equals), "") - - outs, err = d1.Cmd("swarm", "unlock-key", "-q") - c.Assert(err, checker.IsNil, check.Commentf("%s", outs)) - c.Assert(outs, checker.Equals, unlockKey+"\n") + unlockKey := getUnlockKey(d1, c, outs) // Rotate multiple times for i := 0; i != 3; i++ { @@ -1462,21 +1387,13 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *check.C) { d := s.AddDaemon(c, true, true) - var unlockKey string for i := 0; i < 2; i++ { // set to lock outs, err := d.Cmd("swarm", "update", "--autolock") c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) c.Assert(outs, checker.Contains, "docker swarm unlock") + unlockKey := getUnlockKey(d, c, outs) - for _, line := range strings.Split(outs, "\n") { - if strings.Contains(line, "SWMKEY") { - unlockKey = strings.TrimSpace(line) - break - } - } - - c.Assert(unlockKey, checker.Not(checker.Equals), "") checkSwarmUnlockedToLocked(c, d) cmd := d.Command("swarm", "unlock") @@ -2065,3 +1982,16 @@ func (s *DockerSwarmSuite) TestSwarmClusterEventsConfig(c *check.C) { // filtered by config waitForEvent(c, d, t1, "-f type=config", "config remove "+id, defaultRetryCount) } + +func getUnlockKey(d *daemon.Daemon, c *check.C, autolockOutput string) string { + unlockKey, err := d.Cmd("swarm", "unlock-key", "-q") + c.Assert(err, checker.IsNil, check.Commentf("%s", unlockKey)) + unlockKey = strings.TrimSuffix(unlockKey, "\n") + + // Check that "docker swarm init --autolock" or "docker swarm update --autolock" + // contains all the expected strings, including the unlock key + c.Assert(autolockOutput, checker.Contains, "docker swarm unlock") + c.Assert(autolockOutput, checker.Contains, unlockKey) + + return unlockKey +} From 06afc2d1e6f8c5052af71e8815266d30e29ed664 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Wed, 12 Sep 2018 19:30:09 -0700 Subject: [PATCH 5/6] TestAPISwarmLeaderElection: add some debug ...... Signed-off-by: Kir Kolyshkin --- integration-cli/docker_api_swarm_test.go | 4 ++-- integration-cli/docker_utils_test.go | 6 ++++++ internal/test/daemon/node.go | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/integration-cli/docker_api_swarm_test.go b/integration-cli/docker_api_swarm_test.go index 10b9938486..1d35ed0d55 100644 --- a/integration-cli/docker_api_swarm_test.go +++ b/integration-cli/docker_api_swarm_test.go @@ -332,6 +332,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { } // wait for an election to occur + c.Logf("Waiting for election to occur...") waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True) // assert that we have a new leader @@ -343,9 +344,8 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { // add the d1, the initial leader, back d1.Start(c) - // TODO(stevvooe): may need to wait for rejoin here - // wait for possible election + c.Logf("Waiting for possible election...") waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True) // pick out the leader and the followers again diff --git a/integration-cli/docker_utils_test.go b/integration-cli/docker_utils_test.go index 1c05bf5d04..04faef8f83 100644 --- a/integration-cli/docker_utils_test.go +++ b/integration-cli/docker_utils_test.go @@ -419,6 +419,12 @@ func getErrorMessage(c *check.C, body []byte) string { } func waitAndAssert(c *check.C, timeout time.Duration, f checkF, checker check.Checker, args ...interface{}) { + t1 := time.Now() + defer func() { + t2 := time.Now() + c.Logf("waited for %v (out of %v)", t2.Sub(t1), timeout) + }() + after := time.After(timeout) for { v, comment := f(c) diff --git a/internal/test/daemon/node.go b/internal/test/daemon/node.go index d9263a7f29..33dd365429 100644 --- a/internal/test/daemon/node.go +++ b/internal/test/daemon/node.go @@ -23,7 +23,7 @@ func (d *Daemon) GetNode(t assert.TestingT, id string) *swarm.Node { defer cli.Close() node, _, err := cli.NodeInspectWithRaw(context.Background(), id) - assert.NilError(t, err) + assert.NilError(t, err, "[%s] (*Daemon).GetNode: NodeInspectWithRaw(%q) failed", d.id, id) assert.Check(t, node.ID == id) return &node } From 2ed512c7faea938b0b07e69187b8a132e2ecb66a Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Thu, 25 Oct 2018 11:47:56 -0700 Subject: [PATCH 6/6] integration-cli/Test*Swarm*: use same args on restart When starting docker daemons for swarm testing, we disable iptables and use lo for communication (in order to avoid network conflicts). The problem is, these options are lost on restart, that can lead to any sorts of network conflicts and thus connectivity issues between swarm nodes. Fix this. This does not fix issues with swarm test failures, but it seems they appear are less often after this one. Signed-off-by: Kir Kolyshkin --- integration-cli/check_test.go | 2 +- integration-cli/docker_api_swarm_node_test.go | 2 +- integration-cli/docker_api_swarm_test.go | 22 ++++++------- integration-cli/docker_cli_swarm_test.go | 32 +++++++++---------- internal/test/daemon/swarm.go | 32 +++++++++++++------ 5 files changed, 49 insertions(+), 41 deletions(-) diff --git a/integration-cli/check_test.go b/integration-cli/check_test.go index 2282967ee5..10fe4e7646 100644 --- a/integration-cli/check_test.go +++ b/integration-cli/check_test.go @@ -333,7 +333,7 @@ func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemo d.StartAndSwarmInit(c) } } else { - d.StartWithBusybox(c, "--iptables=false", "--swarm-default-advertise-addr=lo") + d.StartNode(c) } s.portIndex++ diff --git a/integration-cli/docker_api_swarm_node_test.go b/integration-cli/docker_api_swarm_node_test.go index 191391620d..30c2285463 100644 --- a/integration-cli/docker_api_swarm_node_test.go +++ b/integration-cli/docker_api_swarm_node_test.go @@ -62,7 +62,7 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) // Restart the node that was removed - d2.Restart(c) + d2.RestartNode(c) // Give some time for the node to rejoin time.Sleep(1 * time.Second) diff --git a/integration-cli/docker_api_swarm_test.go b/integration-cli/docker_api_swarm_test.go index 1d35ed0d55..822accdbb5 100644 --- a/integration-cli/docker_api_swarm_test.go +++ b/integration-cli/docker_api_swarm_test.go @@ -66,8 +66,8 @@ func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { d1.Stop(c) d2.Stop(c) - d1.Start(c) - d2.Start(c) + d1.StartNode(c) + d2.StartNode(c) info = d1.SwarmInfo(c) c.Assert(info.ControlAvailable, checker.True) @@ -342,7 +342,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { stableleader := leader // add the d1, the initial leader, back - d1.Start(c) + d1.StartNode(c) // wait for possible election c.Logf("Waiting for possible election...") @@ -387,7 +387,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { return err.Error(), nil }, checker.Contains, "Make sure more than half of the managers are online.") - d2.Start(c) + d2.StartNode(c) // make sure there is a leader waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) @@ -463,8 +463,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) { waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) - d.Stop(c) - d.Start(c) + d.RestartNode(c) info := d.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) @@ -477,25 +476,22 @@ func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) { id := d1.CreateService(c, simpleTestService, setInstances(instances)) d1.GetService(c, id) - d1.Stop(c) - d1.Start(c) + d1.RestartNode(c) d1.GetService(c, id) d2 := s.AddDaemon(c, true, true) d2.GetService(c, id) - d2.Stop(c) - d2.Start(c) + d2.RestartNode(c) d2.GetService(c, id) d3 := s.AddDaemon(c, true, true) d3.GetService(c, id) - d3.Stop(c) - d3.Start(c) + d3.RestartNode(c) d3.GetService(c, id) d3.Kill() time.Sleep(1 * time.Second) // time to handle signal - d3.Start(c) + d3.StartNode(c) d3.GetService(c, id) } diff --git a/integration-cli/docker_cli_swarm_test.go b/integration-cli/docker_cli_swarm_test.go index 02d922e100..9702e40e9f 100644 --- a/integration-cli/docker_cli_swarm_test.go +++ b/integration-cli/docker_cli_swarm_test.go @@ -163,7 +163,7 @@ func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode") // restart for teardown - d.Start(c) + d.StartNode(c) } func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { @@ -330,7 +330,7 @@ func (s *DockerSwarmSuite) TestSwarmContainerAutoStart(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf("%s", out)) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - d.Restart(c) + d.RestartNode(c) out, err = d.Cmd("ps", "-q") c.Assert(err, checker.IsNil, check.Commentf("%s", out)) @@ -1013,7 +1013,7 @@ func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Daemon) { // Wait for the PEM file to become unencrypted waitAndAssert(c, defaultReconciliationTimeout, checkKeyIsEncrypted(d), checker.Equals, false) - d.Restart(c) + d.RestartNode(c) c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) } @@ -1021,7 +1021,7 @@ func checkSwarmUnlockedToLocked(c *check.C, d *daemon.Daemon) { // Wait for the PEM file to become encrypted waitAndAssert(c, defaultReconciliationTimeout, checkKeyIsEncrypted(d), checker.Equals, true) - d.Restart(c) + d.RestartNode(c) c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) } @@ -1060,7 +1060,7 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) // It starts off locked - d.Restart(c) + d.RestartNode(c) c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) cmd := d.Command("swarm", "unlock") @@ -1099,7 +1099,7 @@ func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf("%s", outs)) // It starts off locked - d.Restart(c, "--swarm-default-advertise-addr=lo") + d.RestartNode(c) info := d.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) @@ -1131,7 +1131,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { d3 := s.AddDaemon(c, true, true) // they start off unlocked - d2.Restart(c) + d2.RestartNode(c) c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) // stop this one so it does not get autolock info @@ -1153,7 +1153,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { } // d2 never got the cluster update, so it is still set to unlocked - d2.Start(c) + d2.StartNode(c) c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) // d2 is now set to lock @@ -1182,7 +1182,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { // managers who join now are never set to locked in the first place d4 := s.AddDaemon(c, true, true) - d4.Restart(c) + d4.RestartNode(c) c.Assert(getNodeStatus(c, d4), checker.Equals, swarm.LocalNodeStateActive) } @@ -1196,7 +1196,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) { // joined workers start off unlocked d2 := s.AddDaemon(c, true, false) - d2.Restart(c) + d2.RestartNode(c) c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) // promote worker @@ -1241,7 +1241,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) { }, checker.Equals, "swarm-worker") // by now, it should *never* be locked on restart - d3.Restart(c) + d3.RestartNode(c) c.Assert(getNodeStatus(c, d3), checker.Equals, swarm.LocalNodeStateActive) } @@ -1261,7 +1261,7 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { c.Assert(newUnlockKey, checker.Not(checker.Equals), "") c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) - d.Restart(c) + d.RestartNode(c) c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) outs, _ = d.Cmd("node", "ls") @@ -1282,7 +1282,7 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { time.Sleep(3 * time.Second) - d.Restart(c) + d.RestartNode(c) cmd = d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) @@ -1331,8 +1331,8 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { c.Assert(newUnlockKey, checker.Not(checker.Equals), "") c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) - d2.Restart(c) - d3.Restart(c) + d2.RestartNode(c) + d3.RestartNode(c) for _, d := range []*daemon.Daemon{d2, d3} { c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) @@ -1355,7 +1355,7 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { time.Sleep(3 * time.Second) - d.Restart(c) + d.RestartNode(c) cmd = d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) diff --git a/internal/test/daemon/swarm.go b/internal/test/daemon/swarm.go index ae6a62c9eb..92ef856640 100644 --- a/internal/test/daemon/swarm.go +++ b/internal/test/daemon/swarm.go @@ -16,26 +16,38 @@ const ( defaultSwarmListenAddr = "0.0.0.0" ) -// StartAndSwarmInit starts the daemon (with busybox) and init the swarm -func (d *Daemon) StartAndSwarmInit(t testingT) { +var ( + startArgs = []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} +) + +// StartNode starts daemon to be used as a swarm node +func (d *Daemon) StartNode(t testingT) { if ht, ok := t.(test.HelperT); ok { ht.Helper() } // avoid networking conflicts - args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} - d.StartWithBusybox(t, args...) + d.StartWithBusybox(t, startArgs...) +} +// RestartNode restarts a daemon to be used as a swarm node +func (d *Daemon) RestartNode(t testingT) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + // avoid networking conflicts + d.Stop(t) + d.StartWithBusybox(t, startArgs...) +} + +// StartAndSwarmInit starts the daemon (with busybox) and init the swarm +func (d *Daemon) StartAndSwarmInit(t testingT) { + d.StartNode(t) d.SwarmInit(t, swarm.InitRequest{}) } // StartAndSwarmJoin starts the daemon (with busybox) and join the specified swarm as worker or manager func (d *Daemon) StartAndSwarmJoin(t testingT, leader *Daemon, manager bool) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - // avoid networking conflicts - args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} - d.StartWithBusybox(t, args...) + d.StartNode(t) tokens := leader.JoinTokens(t) token := tokens.Worker