Merge pull request #23789 from tonistiigi/swarm-integration-tests

Update swarm integration tests and shutdown synchronization
This commit is contained in:
Alexander Morozov 2016-06-29 14:09:57 -07:00 committed by GitHub
commit 9162011680
4 changed files with 207 additions and 167 deletions

View File

@ -86,18 +86,23 @@ type Config struct {
// manager. // manager.
type Cluster struct { type Cluster struct {
sync.RWMutex sync.RWMutex
*node
root string root string
config Config config Config
configEvent chan struct{} // todo: make this array and goroutine safe configEvent chan struct{} // todo: make this array and goroutine safe
node *swarmagent.Node listenAddr string
stop bool
err error
cancelDelay func()
}
type node struct {
*swarmagent.Node
done chan struct{}
ready bool
conn *grpc.ClientConn conn *grpc.ClientConn
client swarmapi.ControlClient client swarmapi.ControlClient
ready bool
listenAddr string
err error
reconnectDelay time.Duration reconnectDelay time.Duration
stop bool
cancelDelay func()
} }
// New creates a new Cluster instance using provided config. // New creates a new Cluster instance using provided config.
@ -110,7 +115,6 @@ func New(config Config) (*Cluster, error) {
root: root, root: root,
config: config, config: config,
configEvent: make(chan struct{}, 10), configEvent: make(chan struct{}, 10),
reconnectDelay: initialReconnectDelay,
} }
st, err := c.loadState() st, err := c.loadState()
@ -121,7 +125,7 @@ func New(config Config) (*Cluster, error) {
return nil, err return nil, err
} }
n, ctx, err := c.startNewNode(false, st.ListenAddr, "", "", "", false) n, err := c.startNewNode(false, st.ListenAddr, "", "", "", false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -130,12 +134,10 @@ func New(config Config) (*Cluster, error) {
case <-time.After(swarmConnectTimeout): case <-time.After(swarmConnectTimeout):
logrus.Errorf("swarm component could not be started before timeout was reached") logrus.Errorf("swarm component could not be started before timeout was reached")
case <-n.Ready(): case <-n.Ready():
case <-ctx.Done(): case <-n.done:
return nil, fmt.Errorf("swarm component could not be started: %v", c.err)
} }
if ctx.Err() != nil { go c.reconnectOnFailure(n)
return nil, fmt.Errorf("swarm component could not be started")
}
go c.reconnectOnFailure(ctx)
return c, nil return c, nil
} }
@ -166,20 +168,20 @@ func (c *Cluster) saveState() error {
return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600) return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600)
} }
func (c *Cluster) reconnectOnFailure(ctx context.Context) { func (c *Cluster) reconnectOnFailure(n *node) {
for { for {
<-ctx.Done() <-n.done
c.Lock() c.Lock()
if c.stop || c.node != nil { if c.stop || c.node != nil {
c.Unlock() c.Unlock()
return return
} }
c.reconnectDelay *= 2 n.reconnectDelay *= 2
if c.reconnectDelay > maxReconnectDelay { if n.reconnectDelay > maxReconnectDelay {
c.reconnectDelay = maxReconnectDelay n.reconnectDelay = maxReconnectDelay
} }
logrus.Warnf("Restarting swarm in %.2f seconds", c.reconnectDelay.Seconds()) logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds())
delayCtx, cancel := context.WithTimeout(context.Background(), c.reconnectDelay) delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay)
c.cancelDelay = cancel c.cancelDelay = cancel
c.Unlock() c.Unlock()
<-delayCtx.Done() <-delayCtx.Done()
@ -192,22 +194,23 @@ func (c *Cluster) reconnectOnFailure(ctx context.Context) {
return return
} }
var err error var err error
_, ctx, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "", "", false) n, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "", "", false)
if err != nil { if err != nil {
c.err = err c.err = err
ctx = delayCtx close(n.done)
} }
c.Unlock() c.Unlock()
} }
} }
func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secret, cahash string, ismanager bool) (*swarmagent.Node, context.Context, error) { func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secret, cahash string, ismanager bool) (*node, error) {
if err := c.config.Backend.IsSwarmCompatible(); err != nil { if err := c.config.Backend.IsSwarmCompatible(); err != nil {
return nil, nil, err return nil, err
} }
c.node = nil c.node = nil
c.cancelDelay = nil c.cancelDelay = nil
node, err := swarmagent.NewNode(&swarmagent.NodeConfig{ c.stop = false
n, err := swarmagent.NewNode(&swarmagent.NodeConfig{
Hostname: c.config.Name, Hostname: c.config.Name,
ForceNewCluster: forceNewCluster, ForceNewCluster: forceNewCluster,
ListenControlAPI: filepath.Join(c.root, controlSocket), ListenControlAPI: filepath.Join(c.root, controlSocket),
@ -222,85 +225,76 @@ func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secre
IsManager: ismanager, IsManager: ismanager,
}) })
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
ctx, cancel := context.WithCancel(context.Background()) ctx := context.Background()
if err := node.Start(ctx); err != nil { if err := n.Start(ctx); err != nil {
return nil, nil, err return nil, err
}
node := &node{
Node: n,
done: make(chan struct{}),
reconnectDelay: initialReconnectDelay,
} }
c.node = node c.node = node
c.listenAddr = listenAddr c.listenAddr = listenAddr
c.saveState() c.saveState()
c.config.Backend.SetClusterProvider(c) c.config.Backend.SetClusterProvider(c)
go func() { go func() {
err := node.Err(ctx) err := n.Err(ctx)
if err != nil { if err != nil {
logrus.Errorf("cluster exited with error: %v", err) logrus.Errorf("cluster exited with error: %v", err)
} }
c.Lock() c.Lock()
c.conn = nil
c.client = nil
c.node = nil c.node = nil
c.ready = false
c.err = err c.err = err
c.Unlock() c.Unlock()
cancel() close(node.done)
}() }()
go func() { go func() {
select { select {
case <-node.Ready(): case <-n.Ready():
c.Lock() c.Lock()
c.reconnectDelay = initialReconnectDelay node.ready = true
c.Unlock()
case <-ctx.Done():
}
if ctx.Err() == nil {
c.Lock()
c.ready = true
c.err = nil c.err = nil
c.Unlock() c.Unlock()
case <-ctx.Done():
} }
c.configEvent <- struct{}{} c.configEvent <- struct{}{}
}() }()
go func() { go func() {
for conn := range node.ListenControlSocket(ctx) { for conn := range n.ListenControlSocket(ctx) {
c.Lock() c.Lock()
if c.conn != conn { if node.conn != conn {
c.client = swarmapi.NewControlClient(conn) if conn == nil {
node.client = nil
} else {
node.client = swarmapi.NewControlClient(conn)
} }
if c.conn != nil {
c.client = nil
} }
c.conn = conn node.conn = conn
c.Unlock() c.Unlock()
c.configEvent <- struct{}{} c.configEvent <- struct{}{}
} }
}() }()
return node, ctx, nil return node, nil
} }
// Init initializes new cluster from user provided request. // Init initializes new cluster from user provided request.
func (c *Cluster) Init(req types.InitRequest) (string, error) { func (c *Cluster) Init(req types.InitRequest) (string, error) {
c.Lock() c.Lock()
if node := c.node; node != nil { if node := c.node; node != nil {
c.Unlock()
if !req.ForceNewCluster { if !req.ForceNewCluster {
c.Unlock()
return "", errSwarmExists(node) return "", errSwarmExists(node)
} }
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) if err := c.stopNode(); err != nil {
defer cancel() c.Unlock()
c.cancelReconnect()
if err := c.node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
return "", err return "", err
} }
c.Lock()
c.node = nil
c.conn = nil
c.ready = false
} }
if err := validateAndSanitizeInitRequest(&req); err != nil { if err := validateAndSanitizeInitRequest(&req); err != nil {
@ -309,7 +303,7 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
} }
// todo: check current state existing // todo: check current state existing
n, ctx, err := c.startNewNode(req.ForceNewCluster, req.ListenAddr, "", "", "", false) n, err := c.startNewNode(req.ForceNewCluster, req.ListenAddr, "", "", "", false)
if err != nil { if err != nil {
c.Unlock() c.Unlock()
return "", err return "", err
@ -321,12 +315,11 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
if err := initClusterSpec(n, req.Spec); err != nil { if err := initClusterSpec(n, req.Spec); err != nil {
return "", err return "", err
} }
go c.reconnectOnFailure(ctx) go c.reconnectOnFailure(n)
return n.NodeID(), nil return n.NodeID(), nil
case <-ctx.Done(): case <-n.done:
c.RLock() c.RLock()
defer c.RUnlock() defer c.RUnlock()
if c.err != nil {
if !req.ForceNewCluster { // if failure on first attempt don't keep state if !req.ForceNewCluster { // if failure on first attempt don't keep state
if err := c.clearState(); err != nil { if err := c.clearState(); err != nil {
return "", err return "", err
@ -334,8 +327,6 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
} }
return "", c.err return "", c.err
} }
return "", ctx.Err()
}
} }
// Join makes current Cluster part of an existing swarm cluster. // Join makes current Cluster part of an existing swarm cluster.
@ -350,7 +341,7 @@ func (c *Cluster) Join(req types.JoinRequest) error {
return err return err
} }
// todo: check current state existing // todo: check current state existing
n, ctx, err := c.startNewNode(false, req.ListenAddr, req.RemoteAddrs[0], req.Secret, req.CACertHash, req.Manager) n, err := c.startNewNode(false, req.ListenAddr, req.RemoteAddrs[0], req.Secret, req.CACertHash, req.Manager)
if err != nil { if err != nil {
c.Unlock() c.Unlock()
return err return err
@ -367,28 +358,41 @@ func (c *Cluster) Join(req types.JoinRequest) error {
certificateRequested = nil certificateRequested = nil
case <-time.After(swarmConnectTimeout): case <-time.After(swarmConnectTimeout):
// attempt to connect will continue in background, also reconnecting // attempt to connect will continue in background, also reconnecting
go c.reconnectOnFailure(ctx) go c.reconnectOnFailure(n)
return ErrSwarmJoinTimeoutReached return ErrSwarmJoinTimeoutReached
case <-n.Ready(): case <-n.Ready():
go c.reconnectOnFailure(ctx) go c.reconnectOnFailure(n)
return nil return nil
case <-ctx.Done(): case <-n.done:
c.RLock() c.RLock()
defer c.RUnlock() defer c.RUnlock()
if c.err != nil {
return c.err return c.err
} }
return ctx.Err()
}
} }
} }
func (c *Cluster) cancelReconnect() { // stopNode is a helper that stops the active c.node and waits until it has
// shut down. Call while keeping the cluster lock.
func (c *Cluster) stopNode() error {
if c.node == nil {
return nil
}
c.stop = true c.stop = true
if c.cancelDelay != nil { if c.cancelDelay != nil {
c.cancelDelay() c.cancelDelay()
c.cancelDelay = nil c.cancelDelay = nil
} }
node := c.node
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
// TODO: can't hold lock on stop because it calls back to network
c.Unlock()
defer c.Lock()
if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
return err
}
<-node.done
return nil
} }
// Leave shuts down Cluster and removes current state. // Leave shuts down Cluster and removes current state.
@ -422,14 +426,11 @@ func (c *Cluster) Leave(force bool) error {
c.Unlock() c.Unlock()
return fmt.Errorf(msg) return fmt.Errorf(msg)
} }
c.cancelReconnect() if err := c.stopNode(); err != nil {
c.Unlock() c.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
return err return err
} }
c.Unlock()
if nodeID := node.NodeID(); nodeID != "" { if nodeID := node.NodeID(); nodeID != "" {
for _, id := range c.config.Backend.ListContainersForNode(nodeID) { for _, id := range c.config.Backend.ListContainersForNode(nodeID) {
if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil {
@ -437,11 +438,6 @@ func (c *Cluster) Leave(force bool) error {
} }
} }
} }
c.Lock()
defer c.Unlock()
c.node = nil
c.conn = nil
c.ready = false
c.configEvent <- struct{}{} c.configEvent <- struct{}{}
// todo: cleanup optional? // todo: cleanup optional?
if err := c.clearState(); err != nil { if err := c.clearState(); err != nil {
@ -531,7 +527,7 @@ func (c *Cluster) IsManager() bool {
func (c *Cluster) IsAgent() bool { func (c *Cluster) IsAgent() bool {
c.RLock() c.RLock()
defer c.RUnlock() defer c.RUnlock()
return c.ready return c.node != nil && c.ready
} }
// GetListenAddress returns the listening address for current manager's // GetListenAddress returns the listening address for current manager's
@ -539,7 +535,7 @@ func (c *Cluster) IsAgent() bool {
func (c *Cluster) GetListenAddress() string { func (c *Cluster) GetListenAddress() string {
c.RLock() c.RLock()
defer c.RUnlock() defer c.RUnlock()
if c.conn != nil { if c.isActiveManager() {
return c.listenAddr return c.listenAddr
} }
return "" return ""
@ -594,7 +590,6 @@ func (c *Cluster) Info() types.Info {
if c.err != nil { if c.err != nil {
info.Error = c.err.Error() info.Error = c.err.Error()
} }
if c.isActiveManager() { if c.isActiveManager() {
info.ControlAvailable = true info.ControlAvailable = true
if r, err := c.client.ListNodes(c.getRequestContext(), &swarmapi.ListNodesRequest{}); err == nil { if r, err := c.client.ListNodes(c.getRequestContext(), &swarmapi.ListNodesRequest{}); err == nil {
@ -623,7 +618,7 @@ func (c *Cluster) Info() types.Info {
// isActiveManager should not be called without a read lock // isActiveManager should not be called without a read lock
func (c *Cluster) isActiveManager() bool { func (c *Cluster) isActiveManager() bool {
return c.conn != nil return c.node != nil && c.conn != nil
} }
// errNoManager returns error describing why manager commands can't be used. // errNoManager returns error describing why manager commands can't be used.
@ -1023,7 +1018,7 @@ func (c *Cluster) Cleanup() {
c.Unlock() c.Unlock()
return return
} }
defer c.Unlock()
if c.isActiveManager() { if c.isActiveManager() {
active, reachable, unreachable, err := c.managerStats() active, reachable, unreachable, err := c.managerStats()
if err == nil { if err == nil {
@ -1033,18 +1028,7 @@ func (c *Cluster) Cleanup() {
} }
} }
} }
c.cancelReconnect() c.stopNode()
c.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if err := node.Stop(ctx); err != nil {
logrus.Errorf("error cleaning up cluster: %v", err)
}
c.Lock()
c.node = nil
c.ready = false
c.conn = nil
c.Unlock()
} }
func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) { func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) {
@ -1139,14 +1123,14 @@ func validateAddr(addr string) (string, error) {
return strings.TrimPrefix(newaddr, "tcp://"), nil return strings.TrimPrefix(newaddr, "tcp://"), nil
} }
func errSwarmExists(node *swarmagent.Node) error { func errSwarmExists(node *node) error {
if node.NodeMembership() != swarmapi.NodeMembershipAccepted { if node.NodeMembership() != swarmapi.NodeMembershipAccepted {
return ErrPendingSwarmExists return ErrPendingSwarmExists
} }
return ErrSwarmExists return ErrSwarmExists
} }
func initClusterSpec(node *swarmagent.Node, spec types.Spec) error { func initClusterSpec(node *node, spec types.Spec) error {
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
for conn := range node.ListenControlSocket(ctx) { for conn := range node.ListenControlSocket(ctx) {
if ctx.Err() != nil { if ctx.Err() != nil {

View File

@ -8,6 +8,7 @@ import (
"github.com/docker/docker/cliconfig" "github.com/docker/docker/cliconfig"
"github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/reexec"
"github.com/docker/engine-api/types/swarm"
"github.com/go-check/check" "github.com/go-check/check"
) )
@ -213,12 +214,15 @@ func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *Swarm
if joinSwarm == true { if joinSwarm == true {
if len(s.daemons) > 0 { if len(s.daemons) > 0 {
c.Assert(d.Join(s.daemons[0].listenAddr, "", "", manager), check.IsNil) c.Assert(d.Join(swarm.JoinRequest{
RemoteAddrs: []string{s.daemons[0].listenAddr},
Manager: manager}), check.IsNil)
} else { } else {
aa := make(map[string]bool) c.Assert(d.Init(swarm.InitRequest{
aa["worker"] = true Spec: swarm.Spec{
aa["manager"] = true AcceptancePolicy: autoAcceptPolicy,
c.Assert(d.Init(aa, ""), check.IsNil) },
}), check.IsNil)
} }
} }

View File

@ -21,22 +21,18 @@ type SwarmDaemon struct {
listenAddr string listenAddr string
} }
// default policy in tests is allow-all
var autoAcceptPolicy = swarm.AcceptancePolicy{
Policies: []swarm.Policy{
{Role: swarm.NodeRoleWorker, Autoaccept: true},
{Role: swarm.NodeRoleManager, Autoaccept: true},
},
}
// Init initializes a new swarm cluster. // Init initializes a new swarm cluster.
func (d *SwarmDaemon) Init(autoAccept map[string]bool, secret string) error { func (d *SwarmDaemon) Init(req swarm.InitRequest) error {
req := swarm.InitRequest{ if req.ListenAddr == "" {
ListenAddr: d.listenAddr, req.ListenAddr = d.listenAddr
}
for _, role := range []swarm.NodeRole{swarm.NodeRoleManager, swarm.NodeRoleWorker} {
policy := swarm.Policy{
Role: role,
Autoaccept: autoAccept[strings.ToLower(string(role))],
}
if secret != "" {
policy.Secret = &secret
}
req.Spec.AcceptancePolicy.Policies = append(req.Spec.AcceptancePolicy.Policies, policy)
} }
status, out, err := d.SockRequest("POST", "/swarm/init", req) status, out, err := d.SockRequest("POST", "/swarm/init", req)
if status != http.StatusOK { if status != http.StatusOK {
@ -53,17 +49,10 @@ func (d *SwarmDaemon) Init(autoAccept map[string]bool, secret string) error {
return nil return nil
} }
// Join joins a current daemon with existing cluster. // Join joins a daemon to an existing cluster.
func (d *SwarmDaemon) Join(remoteAddr, secret, cahash string, manager bool) error { func (d *SwarmDaemon) Join(req swarm.JoinRequest) error {
req := swarm.JoinRequest{ if req.ListenAddr == "" {
ListenAddr: d.listenAddr, req.ListenAddr = d.listenAddr
RemoteAddrs: []string{remoteAddr},
Manager: manager,
CACertHash: cahash,
}
if secret != "" {
req.Secret = secret
} }
status, out, err := d.SockRequest("POST", "/swarm/join", req) status, out, err := d.SockRequest("POST", "/swarm/join", req)
if status != http.StatusOK { if status != http.StatusOK {

View File

@ -38,7 +38,7 @@ func (s *DockerSwarmSuite) TestApiSwarmInit(c *check.C) {
c.Assert(info.ControlAvailable, checker.Equals, false) c.Assert(info.ControlAvailable, checker.Equals, false)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
c.Assert(d2.Join(d1.listenAddr, "", "", false), checker.IsNil) c.Assert(d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
info, err = d2.info() info, err = d2.info()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
@ -76,10 +76,19 @@ func (s *DockerSwarmSuite) TestApiSwarmManualAcceptanceSecret(c *check.C) {
func (s *DockerSwarmSuite) testAPISwarmManualAcceptance(c *check.C, secret string) { func (s *DockerSwarmSuite) testAPISwarmManualAcceptance(c *check.C, secret string) {
d1 := s.AddDaemon(c, false, false) d1 := s.AddDaemon(c, false, false)
c.Assert(d1.Init(map[string]bool{}, secret), checker.IsNil) c.Assert(d1.Init(swarm.InitRequest{
Spec: swarm.Spec{
AcceptancePolicy: swarm.AcceptancePolicy{
Policies: []swarm.Policy{
{Role: swarm.NodeRoleWorker, Secret: &secret},
{Role: swarm.NodeRoleManager, Secret: &secret},
},
},
},
}), checker.IsNil)
d2 := s.AddDaemon(c, false, false) d2 := s.AddDaemon(c, false, false)
err := d2.Join(d1.listenAddr, "", "", false) err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
c.Assert(err, checker.NotNil) c.Assert(err, checker.NotNil)
if secret == "" { if secret == "" {
c.Assert(err.Error(), checker.Contains, "needs to be accepted") c.Assert(err.Error(), checker.Contains, "needs to be accepted")
@ -97,7 +106,7 @@ func (s *DockerSwarmSuite) testAPISwarmManualAcceptance(c *check.C, secret strin
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
} }
d3 := s.AddDaemon(c, false, false) d3 := s.AddDaemon(c, false, false)
c.Assert(d3.Join(d1.listenAddr, secret, "", false), checker.NotNil) c.Assert(d3.Join(swarm.JoinRequest{Secret: secret, RemoteAddrs: []string{d1.listenAddr}}), checker.NotNil)
info, err := d3.info() info, err := d3.info()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
@ -120,26 +129,34 @@ func (s *DockerSwarmSuite) testAPISwarmManualAcceptance(c *check.C, secret strin
func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) { func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
d1 := s.AddDaemon(c, false, false) d1 := s.AddDaemon(c, false, false)
aa := make(map[string]bool) secret := "foobar"
aa["worker"] = true c.Assert(d1.Init(swarm.InitRequest{
c.Assert(d1.Init(aa, "foobar"), checker.IsNil) Spec: swarm.Spec{
AcceptancePolicy: swarm.AcceptancePolicy{
Policies: []swarm.Policy{
{Role: swarm.NodeRoleWorker, Autoaccept: true, Secret: &secret},
{Role: swarm.NodeRoleManager, Secret: &secret},
},
},
},
}), checker.IsNil)
d2 := s.AddDaemon(c, false, false) d2 := s.AddDaemon(c, false, false)
err := d2.Join(d1.listenAddr, "", "", false) err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
c.Assert(err, checker.NotNil) c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "secret token is necessary") c.Assert(err.Error(), checker.Contains, "secret token is necessary")
info, err := d2.info() info, err := d2.info()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
err = d2.Join(d1.listenAddr, "foobaz", "", false) err = d2.Join(swarm.JoinRequest{Secret: "foobaz", RemoteAddrs: []string{d1.listenAddr}})
c.Assert(err, checker.NotNil) c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "secret token is necessary") c.Assert(err.Error(), checker.Contains, "secret token is necessary")
info, err = d2.info() info, err = d2.info()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
c.Assert(d2.Join(d1.listenAddr, "foobar", "", false), checker.IsNil) c.Assert(d2.Join(swarm.JoinRequest{Secret: "foobar", RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
info, err = d2.info() info, err = d2.info()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
@ -156,14 +173,14 @@ func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
} }
}) })
err = d2.Join(d1.listenAddr, "foobar", "", false) err = d2.Join(swarm.JoinRequest{Secret: "foobar", RemoteAddrs: []string{d1.listenAddr}})
c.Assert(err, checker.NotNil) c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "secret token is necessary") c.Assert(err.Error(), checker.Contains, "secret token is necessary")
info, err = d2.info() info, err = d2.info()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
c.Assert(d2.Join(d1.listenAddr, "foobaz", "", false), checker.IsNil) c.Assert(d2.Join(swarm.JoinRequest{Secret: "foobaz", RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
info, err = d2.info() info, err = d2.info()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
@ -182,14 +199,14 @@ func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
} }
}) })
err = d2.Join(d1.listenAddr, "", "", false) err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
c.Assert(err, checker.NotNil) c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "secret token is necessary") c.Assert(err.Error(), checker.Contains, "secret token is necessary")
info, err = d2.info() info, err = d2.info()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
c.Assert(d2.Join(d1.listenAddr, "foobaz", "", false), checker.IsNil) c.Assert(d2.Join(swarm.JoinRequest{Secret: "foobaz", RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
info, err = d2.info() info, err = d2.info()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
@ -206,7 +223,7 @@ func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
} }
}) })
c.Assert(d2.Join(d1.listenAddr, "", "", false), checker.IsNil) c.Assert(d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
info, err = d2.info() info, err = d2.info()
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
@ -220,17 +237,26 @@ func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
func (s *DockerSwarmSuite) TestApiSwarmCAHash(c *check.C) { func (s *DockerSwarmSuite) TestApiSwarmCAHash(c *check.C) {
d1 := s.AddDaemon(c, true, true) d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, false, false) d2 := s.AddDaemon(c, false, false)
err := d2.Join(d1.listenAddr, "", "foobar", false) err := d2.Join(swarm.JoinRequest{CACertHash: "foobar", RemoteAddrs: []string{d1.listenAddr}})
c.Assert(err, checker.NotNil) c.Assert(err, checker.NotNil)
c.Assert(err.Error(), checker.Contains, "invalid checksum digest format") c.Assert(err.Error(), checker.Contains, "invalid checksum digest format")
c.Assert(len(d1.CACertHash), checker.GreaterThan, 0) c.Assert(len(d1.CACertHash), checker.GreaterThan, 0)
c.Assert(d2.Join(d1.listenAddr, "", d1.CACertHash, false), checker.IsNil) c.Assert(d2.Join(swarm.JoinRequest{CACertHash: d1.CACertHash, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
} }
func (s *DockerSwarmSuite) TestApiSwarmPromoteDemote(c *check.C) { func (s *DockerSwarmSuite) TestApiSwarmPromoteDemote(c *check.C) {
d1 := s.AddDaemon(c, false, false) d1 := s.AddDaemon(c, false, false)
c.Assert(d1.Init(map[string]bool{"worker": true}, ""), checker.IsNil) c.Assert(d1.Init(swarm.InitRequest{
Spec: swarm.Spec{
AcceptancePolicy: swarm.AcceptancePolicy{
Policies: []swarm.Policy{
{Role: swarm.NodeRoleWorker, Autoaccept: true},
{Role: swarm.NodeRoleManager},
},
},
},
}), checker.IsNil)
d2 := s.AddDaemon(c, true, false) d2 := s.AddDaemon(c, true, false)
info, err := d2.info() info, err := d2.info()
@ -269,7 +295,7 @@ func (s *DockerSwarmSuite) TestApiSwarmPromoteDemote(c *check.C) {
break break
} }
if i > 100 { if i > 100 {
c.Errorf("node did not turn into manager") c.Errorf("node did not turn into worker")
} else { } else {
break break
} }
@ -561,7 +587,9 @@ func (s *DockerSwarmSuite) TestApiSwarmLeaveOnPendingJoin(c *check.C) {
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
id = strings.TrimSpace(id) id = strings.TrimSpace(id)
go d2.Join("nosuchhost:1234", "", "", false) // will block on pending state go d2.Join(swarm.JoinRequest{
RemoteAddrs: []string{"nosuchhost:1234"},
}) // will block on pending state
for i := 0; ; i++ { for i := 0; ; i++ {
info, err := d2.info() info, err := d2.info()
@ -587,7 +615,9 @@ func (s *DockerSwarmSuite) TestApiSwarmLeaveOnPendingJoin(c *check.C) {
// #23705 // #23705
func (s *DockerSwarmSuite) TestApiSwarmRestoreOnPendingJoin(c *check.C) { func (s *DockerSwarmSuite) TestApiSwarmRestoreOnPendingJoin(c *check.C) {
d := s.AddDaemon(c, false, false) d := s.AddDaemon(c, false, false)
go d.Join("nosuchhost:1234", "", "", false) // will block on pending state go d.Join(swarm.JoinRequest{
RemoteAddrs: []string{"nosuchhost:1234"},
}) // will block on pending state
for i := 0; ; i++ { for i := 0; ; i++ {
info, err := d.info() info, err := d.info()
@ -680,6 +710,39 @@ func (s *DockerSwarmSuite) TestApiSwarmInvalidAddress(c *check.C) {
c.Assert(status, checker.Equals, http.StatusInternalServerError) c.Assert(status, checker.Equals, http.StatusInternalServerError)
} }
func (s *DockerSwarmSuite) TestApiSwarmForceNewCluster(c *check.C) {
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, true, true)
instances := 2
id := d1.createService(c, simpleTestService, setInstances(instances))
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances)
c.Assert(d2.Stop(), checker.IsNil)
time.Sleep(5 * time.Second)
c.Assert(d1.Init(swarm.InitRequest{
ForceNewCluster: true,
Spec: swarm.Spec{
AcceptancePolicy: autoAcceptPolicy,
},
}), checker.IsNil)
waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances)
d3 := s.AddDaemon(c, true, true)
info, err := d3.info()
c.Assert(err, checker.IsNil)
c.Assert(info.ControlAvailable, checker.Equals, true)
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
instances = 4
d3.updateService(c, d3.getService(c, id), setInstances(instances))
waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances)
}
func simpleTestService(s *swarm.Service) { func simpleTestService(s *swarm.Service) {
var ureplicas uint64 var ureplicas uint64
ureplicas = 1 ureplicas = 1