2016-06-13 22:52:49 -04:00
package cluster
import (
2016-11-08 21:03:47 -05:00
"crypto/x509"
2016-11-08 12:32:29 -05:00
"encoding/base64"
2016-06-13 22:52:49 -04:00
"encoding/json"
"fmt"
2016-10-26 04:17:31 -04:00
"io"
2016-06-13 22:52:49 -04:00
"io/ioutil"
2016-06-30 21:07:35 -04:00
"net"
2016-06-13 22:52:49 -04:00
"os"
"path/filepath"
2016-10-19 19:09:54 -04:00
"runtime"
2016-06-13 22:52:49 -04:00
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
2016-11-14 19:12:24 -05:00
distreference "github.com/docker/distribution/reference"
2016-10-21 21:07:55 -04:00
apierrors "github.com/docker/docker/api/errors"
2016-09-06 14:18:12 -04:00
apitypes "github.com/docker/docker/api/types"
2016-10-26 04:17:31 -04:00
"github.com/docker/docker/api/types/backend"
2016-09-06 14:18:12 -04:00
"github.com/docker/docker/api/types/filters"
2016-08-23 19:50:15 -04:00
"github.com/docker/docker/api/types/network"
2016-09-06 14:18:12 -04:00
types "github.com/docker/docker/api/types/swarm"
2016-06-13 22:52:49 -04:00
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
"github.com/docker/docker/daemon/cluster/executor/container"
2016-10-26 04:17:31 -04:00
"github.com/docker/docker/daemon/logger"
2016-06-21 17:27:04 -04:00
"github.com/docker/docker/opts"
2016-06-13 22:52:49 -04:00
"github.com/docker/docker/pkg/ioutils"
2016-08-30 17:17:32 -04:00
"github.com/docker/docker/pkg/signal"
2016-10-26 04:17:31 -04:00
"github.com/docker/docker/pkg/stdcopy"
2016-11-08 12:32:29 -05:00
"github.com/docker/docker/reference"
2016-06-13 22:52:49 -04:00
"github.com/docker/docker/runconfig"
swarmapi "github.com/docker/swarmkit/api"
2016-10-27 21:50:49 -04:00
"github.com/docker/swarmkit/manager/encryption"
2016-10-20 14:26:04 -04:00
swarmnode "github.com/docker/swarmkit/node"
2016-10-26 04:17:31 -04:00
"github.com/docker/swarmkit/protobuf/ptypes"
2016-10-21 21:07:55 -04:00
"github.com/pkg/errors"
2016-06-13 22:52:49 -04:00
"golang.org/x/net/context"
2016-10-21 21:07:55 -04:00
"google.golang.org/grpc"
2016-06-13 22:52:49 -04:00
)
const swarmDirName = "swarm"
const controlSocket = "control.sock"
2016-06-16 12:42:22 -04:00
const swarmConnectTimeout = 20 * time . Second
2016-07-15 13:58:21 -04:00
const swarmRequestTimeout = 20 * time . Second
2016-06-13 22:52:49 -04:00
const stateFile = "docker-state.json"
2016-06-21 17:27:04 -04:00
const defaultAddr = "0.0.0.0:2377"
2016-06-13 22:52:49 -04:00
const (
initialReconnectDelay = 100 * time . Millisecond
2016-06-16 12:42:22 -04:00
maxReconnectDelay = 30 * time . Second
2016-10-26 04:17:31 -04:00
contextPrefix = "com.docker.swarm"
2016-06-13 22:52:49 -04:00
)
// ErrNoSwarm is returned on leaving a cluster that was never initialized
2016-07-25 19:24:32 -04:00
var ErrNoSwarm = fmt . Errorf ( "This node is not part of a swarm" )
2016-06-13 22:52:49 -04:00
// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated
2016-07-25 19:24:32 -04:00
var ErrSwarmExists = fmt . Errorf ( "This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one." )
2016-06-16 12:42:22 -04:00
2016-06-13 22:52:49 -04:00
// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached.
2016-07-25 19:24:32 -04:00
var ErrSwarmJoinTimeoutReached = fmt . Errorf ( "Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node." )
2016-06-13 22:52:49 -04:00
2016-10-21 21:07:55 -04:00
// ErrSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it.
var ErrSwarmLocked = fmt . Errorf ( "Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it." )
2016-11-17 01:13:38 -05:00
// ErrSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically.
2016-11-08 21:03:47 -05:00
var ErrSwarmCertificatesExpired = errors . New ( "Swarm certificates have expired. To replace them, leave the swarm and join again." )
2016-06-30 21:07:35 -04:00
// NetworkSubnetsProvider exposes functions for retrieving the subnets
// of networks managed by Docker, so they can be filtered.
type NetworkSubnetsProvider interface {
V4Subnets ( ) [ ] net . IPNet
V6Subnets ( ) [ ] net . IPNet
2016-06-13 22:52:49 -04:00
}
// Config provides values for Cluster.
type Config struct {
2016-06-30 21:07:35 -04:00
Root string
Name string
Backend executorpkg . Backend
NetworkSubnetsProvider NetworkSubnetsProvider
// DefaultAdvertiseAddr is the default host/IP or network interface to use
// if no AdvertiseAddr value is specified.
DefaultAdvertiseAddr string
2016-08-19 16:06:28 -04:00
// path to store runtime state, such as the swarm control socket
RuntimeRoot string
2016-06-13 22:52:49 -04:00
}
2016-06-24 14:52:28 -04:00
// Cluster provides capabilities to participate in a cluster as a worker or a
// manager.
2016-06-13 22:52:49 -04:00
type Cluster struct {
sync . RWMutex
2016-06-20 19:35:33 -04:00
* node
2016-06-30 21:07:35 -04:00
root string
2016-08-19 16:06:28 -04:00
runtimeRoot string
2016-06-30 21:07:35 -04:00
config Config
configEvent chan struct { } // todo: make this array and goroutine safe
2016-10-21 16:31:45 -04:00
actualLocalAddr string // after resolution, not persisted
2016-06-30 21:07:35 -04:00
stop bool
err error
cancelDelay func ( )
2016-08-23 19:50:15 -04:00
attachers map [ string ] * attacher
2016-10-21 21:07:55 -04:00
locked bool
lastNodeConfig * nodeStartConfig
2016-08-23 19:50:15 -04:00
}
// attacher manages the in-memory attachment state of a container
// attachment to a global scope network managed by swarm manager. It
// helps in identifying the attachment ID via the taskID and the
// corresponding attachment configuration obtained from the manager.
type attacher struct {
2016-09-09 12:55:57 -04:00
taskID string
config * network . NetworkingConfig
attachWaitCh chan * network . NetworkingConfig
attachCompleteCh chan struct { }
detachWaitCh chan struct { }
2016-06-20 19:35:33 -04:00
}
type node struct {
2016-10-20 14:26:04 -04:00
* swarmnode . Node
2016-06-20 19:35:33 -04:00
done chan struct { }
ready bool
2016-06-13 22:52:49 -04:00
conn * grpc . ClientConn
client swarmapi . ControlClient
2016-10-26 04:17:31 -04:00
logs swarmapi . LogsClient
2016-06-13 22:52:49 -04:00
reconnectDelay time . Duration
2016-10-21 16:31:45 -04:00
config nodeStartConfig
}
// nodeStartConfig holds configuration needed to start a new node. Exported
// fields of this structure are saved to disk in json. Unexported fields
// contain data that shouldn't be persisted between daemon reloads.
type nodeStartConfig struct {
// LocalAddr is this machine's local IP or hostname, if specified.
LocalAddr string
// RemoteAddr is the address that was given to "swarm join". It is used
// to find LocalAddr if necessary.
RemoteAddr string
// ListenAddr is the address we bind to, including a port.
ListenAddr string
// AdvertiseAddr is the address other nodes should connect to,
// including a port.
AdvertiseAddr string
joinAddr string
forceNewCluster bool
joinToken string
2016-10-21 21:07:55 -04:00
lockKey [ ] byte
2016-10-27 21:50:49 -04:00
autolock bool
2016-06-13 22:52:49 -04:00
}
// New creates a new Cluster instance using provided config.
func New ( config Config ) ( * Cluster , error ) {
root := filepath . Join ( config . Root , swarmDirName )
if err := os . MkdirAll ( root , 0700 ) ; err != nil {
return nil , err
}
2016-08-19 16:06:28 -04:00
if config . RuntimeRoot == "" {
config . RuntimeRoot = root
}
if err := os . MkdirAll ( config . RuntimeRoot , 0700 ) ; err != nil {
return nil , err
}
2016-06-13 22:52:49 -04:00
c := & Cluster {
2016-06-20 19:35:33 -04:00
root : root ,
config : config ,
configEvent : make ( chan struct { } , 10 ) ,
2016-08-19 16:06:28 -04:00
runtimeRoot : config . RuntimeRoot ,
2016-08-23 19:50:15 -04:00
attachers : make ( map [ string ] * attacher ) ,
2016-06-13 22:52:49 -04:00
}
2016-10-21 16:31:45 -04:00
nodeConfig , err := c . loadState ( )
2016-06-13 22:52:49 -04:00
if err != nil {
if os . IsNotExist ( err ) {
return c , nil
}
return nil , err
}
2016-10-21 16:31:45 -04:00
n , err := c . startNewNode ( * nodeConfig )
2016-06-13 22:52:49 -04:00
if err != nil {
return nil , err
}
select {
case <- time . After ( swarmConnectTimeout ) :
2016-11-01 00:05:01 -04:00
logrus . Error ( "swarm component could not be started before timeout was reached" )
2016-06-16 01:41:43 -04:00
case <- n . Ready ( ) :
2016-06-20 19:35:33 -04:00
case <- n . done :
2016-10-27 21:50:49 -04:00
if errors . Cause ( c . err ) == ErrSwarmLocked {
return c , nil
}
2016-11-08 21:03:47 -05:00
if err , ok := errors . Cause ( c . err ) . ( x509 . CertificateInvalidError ) ; ok && err . Reason == x509 . Expired {
c . err = ErrSwarmCertificatesExpired
return c , nil
}
2016-06-20 19:35:33 -04:00
return nil , fmt . Errorf ( "swarm component could not be started: %v" , c . err )
2016-06-13 22:52:49 -04:00
}
2016-06-20 19:35:33 -04:00
go c . reconnectOnFailure ( n )
2016-06-13 22:52:49 -04:00
return c , nil
}
2016-10-21 16:31:45 -04:00
func ( c * Cluster ) loadState ( ) ( * nodeStartConfig , error ) {
2016-06-18 22:43:47 -04:00
dt , err := ioutil . ReadFile ( filepath . Join ( c . root , stateFile ) )
if err != nil {
return nil , err
}
// missing certificate means no actual state to restore from
if _ , err := os . Stat ( filepath . Join ( c . root , "certificates/swarm-node.crt" ) ) ; err != nil {
if os . IsNotExist ( err ) {
c . clearState ( )
}
return nil , err
}
2016-10-21 16:31:45 -04:00
var st nodeStartConfig
2016-06-18 22:43:47 -04:00
if err := json . Unmarshal ( dt , & st ) ; err != nil {
return nil , err
}
return & st , nil
}
2016-10-21 16:31:45 -04:00
func ( c * Cluster ) saveState ( config nodeStartConfig ) error {
dt , err := json . Marshal ( config )
2016-06-13 22:52:49 -04:00
if err != nil {
return err
}
return ioutils . AtomicWriteFile ( filepath . Join ( c . root , stateFile ) , dt , 0600 )
}
2016-06-20 19:35:33 -04:00
func ( c * Cluster ) reconnectOnFailure ( n * node ) {
2016-06-13 22:52:49 -04:00
for {
2016-06-20 19:35:33 -04:00
<- n . done
2016-06-13 22:52:49 -04:00
c . Lock ( )
if c . stop || c . node != nil {
c . Unlock ( )
return
}
2016-06-20 19:35:33 -04:00
n . reconnectDelay *= 2
if n . reconnectDelay > maxReconnectDelay {
n . reconnectDelay = maxReconnectDelay
2016-06-13 22:52:49 -04:00
}
2016-06-20 19:35:33 -04:00
logrus . Warnf ( "Restarting swarm in %.2f seconds" , n . reconnectDelay . Seconds ( ) )
delayCtx , cancel := context . WithTimeout ( context . Background ( ) , n . reconnectDelay )
2016-06-13 22:52:49 -04:00
c . cancelDelay = cancel
c . Unlock ( )
<- delayCtx . Done ( )
if delayCtx . Err ( ) != context . DeadlineExceeded {
return
}
c . Lock ( )
if c . node != nil {
c . Unlock ( )
return
}
var err error
2016-10-21 16:31:45 -04:00
config := n . config
config . RemoteAddr = c . getRemoteAddress ( )
config . joinAddr = config . RemoteAddr
n , err = c . startNewNode ( config )
2016-06-13 22:52:49 -04:00
if err != nil {
c . err = err
2016-06-20 19:35:33 -04:00
close ( n . done )
2016-06-13 22:52:49 -04:00
}
c . Unlock ( )
}
}
2016-10-21 16:31:45 -04:00
func ( c * Cluster ) startNewNode ( conf nodeStartConfig ) ( * node , error ) {
2016-06-14 12:13:53 -04:00
if err := c . config . Backend . IsSwarmCompatible ( ) ; err != nil {
2016-06-20 19:35:33 -04:00
return nil , err
2016-06-13 22:52:49 -04:00
}
2016-06-30 21:07:35 -04:00
2016-10-21 16:31:45 -04:00
actualLocalAddr := conf . LocalAddr
2016-06-30 21:07:35 -04:00
if actualLocalAddr == "" {
// If localAddr was not specified, resolve it automatically
// based on the route to joinAddr. localAddr can only be left
// empty on "join".
2016-10-21 16:31:45 -04:00
listenHost , _ , err := net . SplitHostPort ( conf . ListenAddr )
2016-06-30 21:07:35 -04:00
if err != nil {
return nil , fmt . Errorf ( "could not parse listen address: %v" , err )
}
listenAddrIP := net . ParseIP ( listenHost )
if listenAddrIP == nil || ! listenAddrIP . IsUnspecified ( ) {
actualLocalAddr = listenHost
} else {
2016-10-21 16:31:45 -04:00
if conf . RemoteAddr == "" {
2016-06-30 21:07:35 -04:00
// Should never happen except using swarms created by
// old versions that didn't save remoteAddr.
2016-10-21 16:31:45 -04:00
conf . RemoteAddr = "8.8.8.8:53"
2016-06-30 21:07:35 -04:00
}
2016-10-21 16:31:45 -04:00
conn , err := net . Dial ( "udp" , conf . RemoteAddr )
2016-06-30 21:07:35 -04:00
if err != nil {
return nil , fmt . Errorf ( "could not find local IP address: %v" , err )
}
localHostPort := conn . LocalAddr ( ) . String ( )
actualLocalAddr , _ , _ = net . SplitHostPort ( localHostPort )
conn . Close ( )
}
}
2016-10-19 19:09:54 -04:00
var control string
if runtime . GOOS == "windows" {
control = ` \\.\pipe\ ` + controlSocket
} else {
control = filepath . Join ( c . runtimeRoot , controlSocket )
}
2016-06-13 22:52:49 -04:00
c . node = nil
c . cancelDelay = nil
2016-06-20 19:35:33 -04:00
c . stop = false
2016-10-20 14:26:04 -04:00
n , err := swarmnode . New ( & swarmnode . Config {
2016-06-30 21:07:35 -04:00
Hostname : c . config . Name ,
2016-10-21 16:31:45 -04:00
ForceNewCluster : conf . forceNewCluster ,
2016-10-19 19:09:54 -04:00
ListenControlAPI : control ,
2016-10-21 16:31:45 -04:00
ListenRemoteAPI : conf . ListenAddr ,
AdvertiseRemoteAPI : conf . AdvertiseAddr ,
JoinAddr : conf . joinAddr ,
2016-06-30 21:07:35 -04:00
StateDir : c . root ,
2016-10-21 16:31:45 -04:00
JoinToken : conf . joinToken ,
2016-06-30 21:07:35 -04:00
Executor : container . NewExecutor ( c . config . Backend ) ,
HeartbeatTick : 1 ,
ElectionTick : 3 ,
2016-10-21 21:07:55 -04:00
UnlockKey : conf . lockKey ,
2016-10-27 21:50:49 -04:00
AutoLockManagers : conf . autolock ,
2016-06-13 22:52:49 -04:00
} )
2016-10-21 21:07:55 -04:00
2016-06-13 22:52:49 -04:00
if err != nil {
2016-06-20 19:35:33 -04:00
return nil , err
2016-06-13 22:52:49 -04:00
}
2016-06-20 19:35:33 -04:00
ctx := context . Background ( )
if err := n . Start ( ctx ) ; err != nil {
return nil , err
}
node := & node {
Node : n ,
done : make ( chan struct { } ) ,
reconnectDelay : initialReconnectDelay ,
2016-10-21 16:31:45 -04:00
config : conf ,
2016-06-13 22:52:49 -04:00
}
c . node = node
2016-06-30 21:07:35 -04:00
c . actualLocalAddr = actualLocalAddr // not saved
2016-10-21 16:31:45 -04:00
c . saveState ( conf )
2016-06-30 21:07:35 -04:00
2016-06-13 22:52:49 -04:00
c . config . Backend . SetClusterProvider ( c )
go func ( ) {
2016-10-27 21:50:49 -04:00
err := detectLockedError ( n . Err ( ctx ) )
2016-06-13 22:52:49 -04:00
if err != nil {
logrus . Errorf ( "cluster exited with error: %v" , err )
}
c . Lock ( )
c . node = nil
c . err = err
2016-10-27 21:50:49 -04:00
if errors . Cause ( err ) == ErrSwarmLocked {
c . locked = true
confClone := conf
c . lastNodeConfig = & confClone
}
2016-06-13 22:52:49 -04:00
c . Unlock ( )
2016-06-20 19:35:33 -04:00
close ( node . done )
2016-06-13 22:52:49 -04:00
} ( )
go func ( ) {
select {
2016-06-20 19:35:33 -04:00
case <- n . Ready ( ) :
2016-06-13 22:52:49 -04:00
c . Lock ( )
2016-06-20 19:35:33 -04:00
node . ready = true
2016-06-13 22:52:49 -04:00
c . err = nil
c . Unlock ( )
2016-06-20 19:35:33 -04:00
case <- ctx . Done ( ) :
2016-06-13 22:52:49 -04:00
}
c . configEvent <- struct { } { }
} ( )
go func ( ) {
2016-06-20 19:35:33 -04:00
for conn := range n . ListenControlSocket ( ctx ) {
2016-06-13 22:52:49 -04:00
c . Lock ( )
2016-06-20 19:35:33 -04:00
if node . conn != conn {
if conn == nil {
node . client = nil
2016-10-26 04:17:31 -04:00
node . logs = nil
2016-06-20 19:35:33 -04:00
} else {
node . client = swarmapi . NewControlClient ( conn )
2016-10-26 04:17:31 -04:00
node . logs = swarmapi . NewLogsClient ( conn )
2016-06-20 19:35:33 -04:00
}
2016-06-13 22:52:49 -04:00
}
2016-06-20 19:35:33 -04:00
node . conn = conn
2016-06-13 22:52:49 -04:00
c . Unlock ( )
c . configEvent <- struct { } { }
}
} ( )
2016-06-20 19:35:33 -04:00
return node , nil
2016-06-13 22:52:49 -04:00
}
// Init initializes new cluster from user provided request.
func ( c * Cluster ) Init ( req types . InitRequest ) ( string , error ) {
c . Lock ( )
2016-11-08 21:03:47 -05:00
if c . swarmExists ( ) {
2016-06-13 22:52:49 -04:00
if ! req . ForceNewCluster {
2016-06-20 19:35:33 -04:00
c . Unlock ( )
2016-07-20 14:15:08 -04:00
return "" , ErrSwarmExists
2016-06-13 22:52:49 -04:00
}
2016-06-20 19:35:33 -04:00
if err := c . stopNode ( ) ; err != nil {
c . Unlock ( )
2016-06-13 22:52:49 -04:00
return "" , err
}
}
2016-06-21 17:27:04 -04:00
if err := validateAndSanitizeInitRequest ( & req ) ; err != nil {
c . Unlock ( )
return "" , err
}
2016-06-30 21:07:35 -04:00
listenHost , listenPort , err := resolveListenAddr ( req . ListenAddr )
if err != nil {
c . Unlock ( )
return "" , err
}
advertiseHost , advertisePort , err := c . resolveAdvertiseAddr ( req . AdvertiseAddr , listenPort )
if err != nil {
c . Unlock ( )
return "" , err
}
localAddr := listenHost
2016-11-10 16:45:32 -05:00
// If the local address is undetermined, the advertise address
// will be used as local address, if it belongs to this system.
// If the advertise address is not local, then we try to find
// a system address to use as local address. If this fails,
// we give up and ask user to pass the listen address.
if net . ParseIP ( localAddr ) . IsUnspecified ( ) {
2016-06-30 21:07:35 -04:00
advertiseIP := net . ParseIP ( advertiseHost )
found := false
2016-11-10 16:45:32 -05:00
for _ , systemIP := range listSystemIPs ( ) {
2016-06-30 21:07:35 -04:00
if systemIP . Equal ( advertiseIP ) {
2016-11-10 16:45:32 -05:00
localAddr = advertiseIP . String ( )
2016-06-30 21:07:35 -04:00
found = true
break
}
}
2016-11-10 16:45:32 -05:00
2016-06-30 21:07:35 -04:00
if ! found {
2016-11-10 16:45:32 -05:00
ip , err := c . resolveSystemAddr ( )
if err != nil {
c . Unlock ( )
logrus . Warnf ( "Could not find a local address: %v" , err )
return "" , errMustSpecifyListenAddr
}
localAddr = ip . String ( )
2016-06-30 21:07:35 -04:00
}
}
2016-06-13 22:52:49 -04:00
// todo: check current state existing
2016-10-21 16:31:45 -04:00
n , err := c . startNewNode ( nodeStartConfig {
forceNewCluster : req . ForceNewCluster ,
2016-10-27 21:50:49 -04:00
autolock : req . AutoLockManagers ,
2016-10-21 16:31:45 -04:00
LocalAddr : localAddr ,
ListenAddr : net . JoinHostPort ( listenHost , listenPort ) ,
AdvertiseAddr : net . JoinHostPort ( advertiseHost , advertisePort ) ,
} )
2016-06-13 22:52:49 -04:00
if err != nil {
c . Unlock ( )
return "" , err
}
c . Unlock ( )
select {
2016-06-16 01:41:43 -04:00
case <- n . Ready ( ) :
2016-06-21 17:27:04 -04:00
if err := initClusterSpec ( n , req . Spec ) ; err != nil {
2016-06-13 22:52:49 -04:00
return "" , err
}
2016-06-20 19:35:33 -04:00
go c . reconnectOnFailure ( n )
2016-06-13 22:52:49 -04:00
return n . NodeID ( ) , nil
2016-06-20 19:35:33 -04:00
case <- n . done :
2016-06-13 22:52:49 -04:00
c . RLock ( )
defer c . RUnlock ( )
2016-06-20 19:35:33 -04:00
if ! req . ForceNewCluster { // if failure on first attempt don't keep state
if err := c . clearState ( ) ; err != nil {
return "" , err
2016-06-13 22:52:49 -04:00
}
}
2016-06-20 19:35:33 -04:00
return "" , c . err
2016-06-13 22:52:49 -04:00
}
}
// Join makes current Cluster part of an existing swarm cluster.
func ( c * Cluster ) Join ( req types . JoinRequest ) error {
c . Lock ( )
2016-11-08 21:03:47 -05:00
if c . swarmExists ( ) {
2016-06-13 22:52:49 -04:00
c . Unlock ( )
2016-07-20 14:15:08 -04:00
return ErrSwarmExists
2016-06-13 22:52:49 -04:00
}
2016-06-21 17:27:04 -04:00
if err := validateAndSanitizeJoinRequest ( & req ) ; err != nil {
c . Unlock ( )
return err
2016-06-13 22:52:49 -04:00
}
2016-06-30 21:07:35 -04:00
listenHost , listenPort , err := resolveListenAddr ( req . ListenAddr )
if err != nil {
c . Unlock ( )
return err
}
var advertiseAddr string
2016-08-31 14:44:32 -04:00
if req . AdvertiseAddr != "" {
advertiseHost , advertisePort , err := c . resolveAdvertiseAddr ( req . AdvertiseAddr , listenPort )
// For joining, we don't need to provide an advertise address,
// since the remote side can detect it.
if err == nil {
advertiseAddr = net . JoinHostPort ( advertiseHost , advertisePort )
}
2016-06-30 21:07:35 -04:00
}
2016-06-21 17:27:04 -04:00
// todo: check current state existing
2016-10-21 16:31:45 -04:00
n , err := c . startNewNode ( nodeStartConfig {
RemoteAddr : req . RemoteAddrs [ 0 ] ,
ListenAddr : net . JoinHostPort ( listenHost , listenPort ) ,
AdvertiseAddr : advertiseAddr ,
joinAddr : req . RemoteAddrs [ 0 ] ,
joinToken : req . JoinToken ,
} )
2016-06-13 22:52:49 -04:00
if err != nil {
c . Unlock ( )
return err
}
c . Unlock ( )
2016-07-20 14:15:08 -04:00
select {
case <- time . After ( swarmConnectTimeout ) :
2016-10-03 16:38:01 -04:00
// attempt to connect will continue in background, but reconnect only if it didn't fail
go func ( ) {
select {
case <- n . Ready ( ) :
c . reconnectOnFailure ( n )
case <- n . done :
logrus . Errorf ( "failed to join the cluster: %+v" , c . err )
}
} ( )
2016-07-20 14:15:08 -04:00
return ErrSwarmJoinTimeoutReached
case <- n . Ready ( ) :
go c . reconnectOnFailure ( n )
return nil
case <- n . done :
c . RLock ( )
defer c . RUnlock ( )
return c . err
2016-06-13 22:52:49 -04:00
}
}
2016-10-21 21:07:55 -04:00
// GetUnlockKey returns the unlock key for the swarm.
func ( c * Cluster ) GetUnlockKey ( ) ( string , error ) {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
return "" , c . errNoManager ( )
}
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
client := swarmapi . NewCAClient ( c . conn )
r , err := client . GetUnlockKey ( ctx , & swarmapi . GetUnlockKeyRequest { } )
if err != nil {
return "" , err
}
2016-10-28 19:35:49 -04:00
if len ( r . UnlockKey ) == 0 {
// no key
return "" , nil
}
2016-10-21 21:07:55 -04:00
return encryption . HumanReadableKey ( r . UnlockKey ) , nil
}
// UnlockSwarm provides a key to decrypt data that is encrypted at rest.
func ( c * Cluster ) UnlockSwarm ( req types . UnlockRequest ) error {
2016-10-27 21:50:49 -04:00
key , err := encryption . ParseHumanReadableKey ( req . UnlockKey )
if err != nil {
return err
2016-10-21 21:07:55 -04:00
}
c . Lock ( )
if c . node != nil || c . locked != true {
c . Unlock ( )
return errors . New ( "swarm is not locked" )
}
config := * c . lastNodeConfig
2016-10-27 21:50:49 -04:00
config . lockKey = key
2016-10-21 21:07:55 -04:00
n , err := c . startNewNode ( config )
if err != nil {
c . Unlock ( )
return err
}
c . Unlock ( )
select {
case <- n . Ready ( ) :
case <- n . done :
2016-10-31 18:02:34 -04:00
if errors . Cause ( c . err ) == ErrSwarmLocked {
return errors . New ( "swarm could not be unlocked: invalid key provided" )
}
2016-10-21 21:07:55 -04:00
return fmt . Errorf ( "swarm component could not be started: %v" , c . err )
}
go c . reconnectOnFailure ( n )
return nil
}
2016-06-20 19:35:33 -04:00
// stopNode is a helper that stops the active c.node and waits until it has
// shut down. Call while keeping the cluster lock.
func ( c * Cluster ) stopNode ( ) error {
if c . node == nil {
return nil
}
2016-06-13 22:52:49 -04:00
c . stop = true
if c . cancelDelay != nil {
c . cancelDelay ( )
c . cancelDelay = nil
}
2016-06-20 19:35:33 -04:00
node := c . node
ctx , cancel := context . WithTimeout ( context . Background ( ) , 15 * time . Second )
defer cancel ( )
// TODO: can't hold lock on stop because it calls back to network
c . Unlock ( )
defer c . Lock ( )
if err := node . Stop ( ctx ) ; err != nil && ! strings . Contains ( err . Error ( ) , "context canceled" ) {
return err
}
<- node . done
return nil
2016-06-13 22:52:49 -04:00
}
2016-08-19 16:49:58 -04:00
func removingManagerCausesLossOfQuorum ( reachable , unreachable int ) bool {
return reachable - 2 <= unreachable
}
func isLastManager ( reachable , unreachable int ) bool {
return reachable == 1 && unreachable == 0
}
2016-06-13 22:52:49 -04:00
// Leave shuts down Cluster and removes current state.
func ( c * Cluster ) Leave ( force bool ) error {
c . Lock ( )
node := c . node
if node == nil {
2016-10-21 21:07:55 -04:00
if c . locked {
c . locked = false
c . lastNodeConfig = nil
c . Unlock ( )
2016-11-08 21:03:47 -05:00
} else if c . err == ErrSwarmCertificatesExpired {
c . err = nil
c . Unlock ( )
2016-10-21 21:07:55 -04:00
} else {
c . Unlock ( )
return ErrNoSwarm
}
} else {
if node . Manager ( ) != nil && ! force {
msg := "You are attempting to leave the swarm on a node that is participating as a manager. "
if c . isActiveManager ( ) {
active , reachable , unreachable , err := c . managerStats ( )
if err == nil {
if active && removingManagerCausesLossOfQuorum ( reachable , unreachable ) {
if isLastManager ( reachable , unreachable ) {
msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. "
c . Unlock ( )
return fmt . Errorf ( msg )
}
msg += fmt . Sprintf ( "Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. " , reachable - 1 , reachable + unreachable )
2016-06-13 22:52:49 -04:00
}
}
2016-10-21 21:07:55 -04:00
} else {
msg += "Doing so may lose the consensus of your cluster. "
2016-06-13 22:52:49 -04:00
}
2016-10-21 21:07:55 -04:00
msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message."
c . Unlock ( )
return fmt . Errorf ( msg )
}
if err := c . stopNode ( ) ; err != nil {
logrus . Errorf ( "failed to shut down cluster node: %v" , err )
signal . DumpStacks ( "" )
c . Unlock ( )
2016-08-20 08:14:26 -04:00
return err
}
2016-10-21 21:07:55 -04:00
c . Unlock ( )
if nodeID := node . NodeID ( ) ; nodeID != "" {
nodeContainers , err := c . listContainerForNode ( nodeID )
if err != nil {
return err
}
for _ , id := range nodeContainers {
if err := c . config . Backend . ContainerRm ( id , & apitypes . ContainerRmConfig { ForceRemove : true } ) ; err != nil {
logrus . Errorf ( "error removing %v: %v" , id , err )
}
2016-06-16 19:08:15 -04:00
}
2016-06-13 22:52:49 -04:00
}
}
c . configEvent <- struct { } { }
// todo: cleanup optional?
if err := c . clearState ( ) ; err != nil {
return err
}
return nil
}
2016-08-20 08:14:26 -04:00
func ( c * Cluster ) listContainerForNode ( nodeID string ) ( [ ] string , error ) {
var ids [ ] string
filters := filters . NewArgs ( )
filters . Add ( "label" , fmt . Sprintf ( "com.docker.swarm.node.id=%s" , nodeID ) )
containers , err := c . config . Backend . Containers ( & apitypes . ContainerListOptions {
2016-11-01 10:01:16 -04:00
Filters : filters ,
2016-08-20 08:14:26 -04:00
} )
if err != nil {
return [ ] string { } , err
}
for _ , c := range containers {
ids = append ( ids , c . ID )
}
return ids , nil
}
2016-06-13 22:52:49 -04:00
func ( c * Cluster ) clearState ( ) error {
2016-06-18 22:43:47 -04:00
// todo: backup this data instead of removing?
2016-06-13 22:52:49 -04:00
if err := os . RemoveAll ( c . root ) ; err != nil {
return err
}
if err := os . MkdirAll ( c . root , 0700 ) ; err != nil {
return err
}
c . config . Backend . SetClusterProvider ( nil )
return nil
}
2016-07-15 13:58:21 -04:00
func ( c * Cluster ) getRequestContext ( ) ( context . Context , func ( ) ) { // TODO: not needed when requests don't block on qourum lost
return context . WithTimeout ( context . Background ( ) , swarmRequestTimeout )
2016-06-13 22:52:49 -04:00
}
2016-06-24 14:52:28 -04:00
// Inspect retrieves the configuration properties of a managed swarm cluster.
2016-06-13 22:52:49 -04:00
func ( c * Cluster ) Inspect ( ) ( types . Swarm , error ) {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return types . Swarm { } , c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
swarm , err := getSwarm ( ctx , c . client )
2016-06-13 22:52:49 -04:00
if err != nil {
return types . Swarm { } , err
}
return convert . SwarmFromGRPC ( * swarm ) , nil
}
// Update updates configuration of a managed swarm cluster.
2016-07-20 14:15:08 -04:00
func ( c * Cluster ) Update ( version uint64 , spec types . Spec , flags types . UpdateFlags ) error {
2016-06-13 22:52:49 -04:00
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
swarm , err := getSwarm ( ctx , c . client )
2016-06-13 22:52:49 -04:00
if err != nil {
return err
}
2016-08-26 00:08:53 -04:00
// In update, client should provide the complete spec of the swarm, including
// Name and Labels. If a field is specified with 0 or nil, then the default value
// will be used to swarmkit.
clusterSpec , err := convert . SwarmSpecToGRPC ( spec )
2016-06-13 22:52:49 -04:00
if err != nil {
return err
}
_ , err = c . client . UpdateCluster (
2016-07-15 13:58:21 -04:00
ctx ,
2016-06-13 22:52:49 -04:00
& swarmapi . UpdateClusterRequest {
ClusterID : swarm . ID ,
2016-08-26 00:08:53 -04:00
Spec : & clusterSpec ,
2016-06-13 22:52:49 -04:00
ClusterVersion : & swarmapi . Version {
Index : version ,
} ,
2016-10-27 21:50:49 -04:00
Rotation : swarmapi . KeyRotation {
WorkerJoinToken : flags . RotateWorkerToken ,
ManagerJoinToken : flags . RotateManagerToken ,
ManagerUnlockKey : flags . RotateManagerUnlockKey ,
2016-07-20 14:15:08 -04:00
} ,
2016-06-13 22:52:49 -04:00
} ,
)
return err
}
2016-06-24 14:52:28 -04:00
// IsManager returns true if Cluster is participating as a manager.
2016-06-13 22:52:49 -04:00
func ( c * Cluster ) IsManager ( ) bool {
c . RLock ( )
defer c . RUnlock ( )
return c . isActiveManager ( )
}
2016-06-24 14:52:28 -04:00
// IsAgent returns true if Cluster is participating as a worker/agent.
2016-06-13 22:52:49 -04:00
func ( c * Cluster ) IsAgent ( ) bool {
c . RLock ( )
defer c . RUnlock ( )
2016-06-20 19:35:33 -04:00
return c . node != nil && c . ready
2016-06-13 22:52:49 -04:00
}
2016-06-30 21:07:35 -04:00
// GetLocalAddress returns the local address.
func ( c * Cluster ) GetLocalAddress ( ) string {
2016-06-13 22:52:49 -04:00
c . RLock ( )
defer c . RUnlock ( )
2016-06-30 21:07:35 -04:00
return c . actualLocalAddr
}
2016-09-22 21:43:54 -04:00
// GetListenAddress returns the listen address.
func ( c * Cluster ) GetListenAddress ( ) string {
c . RLock ( )
defer c . RUnlock ( )
2016-10-21 16:31:45 -04:00
if c . node != nil {
return c . node . config . ListenAddr
}
return ""
2016-09-22 21:43:54 -04:00
}
2016-06-30 21:07:35 -04:00
// GetAdvertiseAddress returns the remotely reachable address of this node.
func ( c * Cluster ) GetAdvertiseAddress ( ) string {
c . RLock ( )
defer c . RUnlock ( )
2016-10-21 16:31:45 -04:00
if c . node != nil && c . node . config . AdvertiseAddr != "" {
advertiseHost , _ , _ := net . SplitHostPort ( c . node . config . AdvertiseAddr )
2016-06-30 21:07:35 -04:00
return advertiseHost
2016-06-13 22:52:49 -04:00
}
2016-06-30 21:07:35 -04:00
return c . actualLocalAddr
2016-06-13 22:52:49 -04:00
}
2016-06-24 14:52:28 -04:00
// GetRemoteAddress returns a known advertise address of a remote manager if
2016-06-13 22:52:49 -04:00
// available.
// todo: change to array/connect with info
func ( c * Cluster ) GetRemoteAddress ( ) string {
c . RLock ( )
defer c . RUnlock ( )
return c . getRemoteAddress ( )
}
func ( c * Cluster ) getRemoteAddress ( ) string {
if c . node == nil {
return ""
}
nodeID := c . node . NodeID ( )
for _ , r := range c . node . Remotes ( ) {
if r . NodeID != nodeID {
return r . Addr
}
}
return ""
}
// ListenClusterEvents returns a channel that receives messages on cluster
// participation changes.
// todo: make cancelable and accessible to multiple callers
func ( c * Cluster ) ListenClusterEvents ( ) <- chan struct { } {
return c . configEvent
}
// Info returns information about the current cluster state.
func ( c * Cluster ) Info ( ) types . Info {
2016-06-30 21:07:35 -04:00
info := types . Info {
NodeAddr : c . GetAdvertiseAddress ( ) ,
}
2016-06-13 22:52:49 -04:00
c . RLock ( )
defer c . RUnlock ( )
if c . node == nil {
info . LocalNodeState = types . LocalNodeStateInactive
if c . cancelDelay != nil {
info . LocalNodeState = types . LocalNodeStateError
}
2016-10-21 21:07:55 -04:00
if c . locked {
info . LocalNodeState = types . LocalNodeStateLocked
2016-11-08 21:03:47 -05:00
} else if c . err == ErrSwarmCertificatesExpired {
info . LocalNodeState = types . LocalNodeStateError
2016-10-21 21:07:55 -04:00
}
2016-06-13 22:52:49 -04:00
} else {
info . LocalNodeState = types . LocalNodeStatePending
if c . ready == true {
info . LocalNodeState = types . LocalNodeStateActive
2016-10-31 18:02:34 -04:00
} else if c . locked {
info . LocalNodeState = types . LocalNodeStateLocked
2016-06-13 22:52:49 -04:00
}
}
if c . err != nil {
info . Error = c . err . Error ( )
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-06-13 22:52:49 -04:00
if c . isActiveManager ( ) {
info . ControlAvailable = true
2016-07-24 04:53:52 -04:00
swarm , err := c . Inspect ( )
if err != nil {
info . Error = err . Error ( )
}
2016-07-27 15:06:00 -04:00
// Strip JoinTokens
info . Cluster = swarm . ClusterInfo
2016-07-15 13:58:21 -04:00
if r , err := c . client . ListNodes ( ctx , & swarmapi . ListNodesRequest { } ) ; err == nil {
2016-06-13 22:52:49 -04:00
info . Nodes = len ( r . Nodes )
for _ , n := range r . Nodes {
if n . ManagerStatus != nil {
info . Managers = info . Managers + 1
}
}
}
}
if c . node != nil {
for _ , r := range c . node . Remotes ( ) {
info . RemoteManagers = append ( info . RemoteManagers , types . Peer { NodeID : r . NodeID , Addr : r . Addr } )
}
info . NodeID = c . node . NodeID ( )
}
return info
}
// isActiveManager should not be called without a read lock
func ( c * Cluster ) isActiveManager ( ) bool {
2016-06-20 19:35:33 -04:00
return c . node != nil && c . conn != nil
2016-06-13 22:52:49 -04:00
}
2016-11-08 21:03:47 -05:00
// swarmExists should not be called without a read lock
func ( c * Cluster ) swarmExists ( ) bool {
return c . node != nil || c . locked || c . err == ErrSwarmCertificatesExpired
}
2016-06-23 16:52:41 -04:00
// errNoManager returns error describing why manager commands can't be used.
// Call with read lock.
func ( c * Cluster ) errNoManager ( ) error {
if c . node == nil {
2016-10-21 21:07:55 -04:00
if c . locked {
return ErrSwarmLocked
}
2016-11-08 21:03:47 -05:00
if c . err == ErrSwarmCertificatesExpired {
return ErrSwarmCertificatesExpired
}
2016-07-22 03:20:52 -04:00
return fmt . Errorf ( "This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again." )
2016-06-23 16:52:41 -04:00
}
if c . node . Manager ( ) != nil {
2016-07-20 14:15:08 -04:00
return fmt . Errorf ( "This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster." )
2016-06-23 16:52:41 -04:00
}
2016-07-20 14:15:08 -04:00
return fmt . Errorf ( "This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager." )
2016-06-23 16:52:41 -04:00
}
2016-06-13 22:52:49 -04:00
// GetServices returns all services of a managed swarm cluster.
func ( c * Cluster ) GetServices ( options apitypes . ServiceListOptions ) ( [ ] types . Service , error ) {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return nil , c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-11-01 10:01:16 -04:00
filters , err := newListServicesFilters ( options . Filters )
2016-06-13 22:52:49 -04:00
if err != nil {
return nil , err
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-06-13 22:52:49 -04:00
r , err := c . client . ListServices (
2016-07-15 13:58:21 -04:00
ctx ,
2016-06-13 22:52:49 -04:00
& swarmapi . ListServicesRequest { Filters : filters } )
if err != nil {
return nil , err
}
2016-07-12 05:41:04 -04:00
services := [ ] types . Service { }
2016-06-13 22:52:49 -04:00
for _ , service := range r . Services {
services = append ( services , convert . ServiceFromGRPC ( * service ) )
}
return services , nil
}
2016-11-08 12:32:29 -05:00
// imageWithDigestString takes an image such as name or name:tag
// and returns the image pinned to a digest, such as name@sha256:34234...
2016-11-14 19:12:24 -05:00
// Due to the difference between the docker/docker/reference, and the
// docker/distribution/reference packages, we're parsing the image twice.
// As the two packages converge, this function should be simplified.
// TODO(nishanttotla): After the packages converge, the function must
// convert distreference.Named -> distreference.Canonical, and the logic simplified.
2016-11-08 12:32:29 -05:00
func ( c * Cluster ) imageWithDigestString ( ctx context . Context , image string , authConfig * apitypes . AuthConfig ) ( string , error ) {
2016-11-14 19:12:24 -05:00
ref , err := distreference . ParseNamed ( image )
2016-11-08 12:32:29 -05:00
if err != nil {
return "" , err
}
// only query registry if not a canonical reference (i.e. with digest)
2016-11-14 19:12:24 -05:00
if _ , ok := ref . ( distreference . Canonical ) ; ! ok {
// create a docker/docker/reference Named object because GetRepository needs it
dockerRef , err := reference . ParseNamed ( image )
if err != nil {
return "" , err
}
dockerRef = reference . WithDefaultTag ( dockerRef )
namedTaggedRef , ok := dockerRef . ( reference . NamedTagged )
2016-11-08 12:32:29 -05:00
if ! ok {
return "" , fmt . Errorf ( "unable to cast image to NamedTagged reference object" )
}
repo , _ , err := c . config . Backend . GetRepository ( ctx , namedTaggedRef , authConfig )
if err != nil {
return "" , err
}
dscrptr , err := repo . Tags ( ctx ) . Get ( ctx , namedTaggedRef . Tag ( ) )
if err != nil {
return "" , err
}
2016-11-14 19:12:24 -05:00
namedDigestedRef , err := distreference . WithDigest ( distreference . EnsureTagged ( ref ) , dscrptr . Digest )
if err != nil {
return "" , err
}
return namedDigestedRef . String ( ) , nil
2016-11-08 12:32:29 -05:00
} else {
// reference already contains a digest, so just return it
return ref . String ( ) , nil
}
}
2016-06-13 22:52:49 -04:00
// CreateService creates a new service in a managed swarm cluster.
2016-06-15 14:50:49 -04:00
func ( c * Cluster ) CreateService ( s types . ServiceSpec , encodedAuth string ) ( string , error ) {
2016-06-13 22:52:49 -04:00
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return "" , c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-06-13 22:52:49 -04:00
2016-07-23 11:11:10 -04:00
err := c . populateNetworkID ( ctx , c . client , & s )
2016-06-13 22:52:49 -04:00
if err != nil {
return "" , err
}
serviceSpec , err := convert . ServiceSpecToGRPC ( s )
if err != nil {
return "" , err
}
2016-06-15 14:50:49 -04:00
2016-11-08 12:32:29 -05:00
ctnr := serviceSpec . Task . GetContainer ( )
if ctnr == nil {
return "" , fmt . Errorf ( "service does not use container tasks" )
}
2016-06-15 14:50:49 -04:00
if encodedAuth != "" {
2016-06-29 20:08:00 -04:00
ctnr . PullOptions = & swarmapi . ContainerSpec_PullOptions { RegistryAuth : encodedAuth }
2016-06-15 14:50:49 -04:00
}
2016-11-08 12:32:29 -05:00
// retrieve auth config from encoded auth
authConfig := & apitypes . AuthConfig { }
if encodedAuth != "" {
if err := json . NewDecoder ( base64 . NewDecoder ( base64 . URLEncoding , strings . NewReader ( encodedAuth ) ) ) . Decode ( authConfig ) ; err != nil {
logrus . Warnf ( "invalid authconfig: %v" , err )
}
}
// pin image by digest
if os . Getenv ( "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" ) != "1" {
digestImage , err := c . imageWithDigestString ( ctx , ctnr . Image , authConfig )
if err != nil {
logrus . Warnf ( "unable to pin image %s to digest: %s" , ctnr . Image , err . Error ( ) )
} else {
logrus . Debugf ( "pinning image %s by digest: %s" , ctnr . Image , digestImage )
ctnr . Image = digestImage
}
}
2016-06-13 22:52:49 -04:00
r , err := c . client . CreateService ( ctx , & swarmapi . CreateServiceRequest { Spec : & serviceSpec } )
if err != nil {
return "" , err
}
return r . Service . ID , nil
}
2016-06-24 14:52:28 -04:00
// GetService returns a service based on an ID or name.
2016-06-13 22:52:49 -04:00
func ( c * Cluster ) GetService ( input string ) ( types . Service , error ) {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return types . Service { } , c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
service , err := getService ( ctx , c . client , input )
2016-06-13 22:52:49 -04:00
if err != nil {
return types . Service { } , err
}
return convert . ServiceFromGRPC ( * service ) , nil
}
// UpdateService updates existing service to match new properties.
2016-09-02 17:12:05 -04:00
func ( c * Cluster ) UpdateService ( serviceIDOrName string , version uint64 , spec types . ServiceSpec , encodedAuth string , registryAuthFrom string ) error {
2016-06-13 22:52:49 -04:00
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-07-08 21:00:41 -04:00
2016-07-23 11:11:10 -04:00
err := c . populateNetworkID ( ctx , c . client , & spec )
2016-07-08 21:00:41 -04:00
if err != nil {
return err
}
2016-06-13 22:52:49 -04:00
serviceSpec , err := convert . ServiceSpecToGRPC ( spec )
if err != nil {
return err
}
2016-08-29 00:15:03 -04:00
currentService , err := getService ( ctx , c . client , serviceIDOrName )
if err != nil {
return err
}
2016-11-08 12:32:29 -05:00
newCtnr := serviceSpec . Task . GetContainer ( )
if newCtnr == nil {
return fmt . Errorf ( "service does not use container tasks" )
}
2016-06-15 14:50:49 -04:00
if encodedAuth != "" {
2016-11-08 12:32:29 -05:00
newCtnr . PullOptions = & swarmapi . ContainerSpec_PullOptions { RegistryAuth : encodedAuth }
2016-06-29 20:08:00 -04:00
} else {
// this is needed because if the encodedAuth isn't being updated then we
// shouldn't lose it, and continue to use the one that was already present
2016-09-02 17:12:05 -04:00
var ctnr * swarmapi . ContainerSpec
switch registryAuthFrom {
case apitypes . RegistryAuthFromSpec , "" :
ctnr = currentService . Spec . Task . GetContainer ( )
case apitypes . RegistryAuthFromPreviousSpec :
if currentService . PreviousSpec == nil {
return fmt . Errorf ( "service does not have a previous spec" )
}
ctnr = currentService . PreviousSpec . Task . GetContainer ( )
default :
return fmt . Errorf ( "unsupported registryAuthFromValue" )
}
2016-06-29 20:08:00 -04:00
if ctnr == nil {
return fmt . Errorf ( "service does not use container tasks" )
}
2016-11-08 12:32:29 -05:00
newCtnr . PullOptions = ctnr . PullOptions
// update encodedAuth so it can be used to pin image by digest
if ctnr . PullOptions != nil {
encodedAuth = ctnr . PullOptions . RegistryAuth
}
}
// retrieve auth config from encoded auth
authConfig := & apitypes . AuthConfig { }
if encodedAuth != "" {
if err := json . NewDecoder ( base64 . NewDecoder ( base64 . URLEncoding , strings . NewReader ( encodedAuth ) ) ) . Decode ( authConfig ) ; err != nil {
logrus . Warnf ( "invalid authconfig: %v" , err )
}
}
// pin image by digest
if os . Getenv ( "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" ) != "1" {
digestImage , err := c . imageWithDigestString ( ctx , newCtnr . Image , authConfig )
if err != nil {
logrus . Warnf ( "unable to pin image %s to digest: %s" , newCtnr . Image , err . Error ( ) )
} else if newCtnr . Image != digestImage {
logrus . Debugf ( "pinning image %s by digest: %s" , newCtnr . Image , digestImage )
newCtnr . Image = digestImage
}
2016-06-15 14:50:49 -04:00
}
2016-06-13 22:52:49 -04:00
_ , err = c . client . UpdateService (
2016-07-15 13:58:21 -04:00
ctx ,
2016-06-13 22:52:49 -04:00
& swarmapi . UpdateServiceRequest {
2016-08-29 00:15:03 -04:00
ServiceID : currentService . ID ,
2016-06-13 22:52:49 -04:00
Spec : & serviceSpec ,
ServiceVersion : & swarmapi . Version {
Index : version ,
} ,
} ,
)
return err
}
// RemoveService removes a service from a managed swarm cluster.
func ( c * Cluster ) RemoveService ( input string ) error {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
service , err := getService ( ctx , c . client , input )
2016-06-13 22:52:49 -04:00
if err != nil {
return err
}
2016-07-15 13:58:21 -04:00
if _ , err := c . client . RemoveService ( ctx , & swarmapi . RemoveServiceRequest { ServiceID : service . ID } ) ; err != nil {
2016-06-13 22:52:49 -04:00
return err
}
return nil
}
2016-10-26 04:17:31 -04:00
// ServiceLogs collects service logs and writes them back to `config.OutStream`
func ( c * Cluster ) ServiceLogs ( ctx context . Context , input string , config * backend . ContainerLogsConfig , started chan struct { } ) error {
c . RLock ( )
if ! c . isActiveManager ( ) {
c . RUnlock ( )
return c . errNoManager ( )
}
service , err := getService ( ctx , c . client , input )
if err != nil {
c . RUnlock ( )
return err
}
stream , err := c . logs . SubscribeLogs ( ctx , & swarmapi . SubscribeLogsRequest {
Selector : & swarmapi . LogSelector {
ServiceIDs : [ ] string { service . ID } ,
} ,
Options : & swarmapi . LogSubscriptionOptions {
Follow : true ,
} ,
} )
if err != nil {
c . RUnlock ( )
return err
}
wf := ioutils . NewWriteFlusher ( config . OutStream )
defer wf . Close ( )
close ( started )
wf . Flush ( )
outStream := stdcopy . NewStdWriter ( wf , stdcopy . Stdout )
errStream := stdcopy . NewStdWriter ( wf , stdcopy . Stderr )
// Release the lock before starting the stream.
c . RUnlock ( )
for {
// Check the context before doing anything.
select {
case <- ctx . Done ( ) :
return ctx . Err ( )
default :
}
subscribeMsg , err := stream . Recv ( )
if err == io . EOF {
return nil
}
if err != nil {
return err
}
for _ , msg := range subscribeMsg . Messages {
data := [ ] byte { }
if config . Timestamps {
ts , err := ptypes . Timestamp ( msg . Timestamp )
if err != nil {
return err
}
data = append ( data , [ ] byte ( ts . Format ( logger . TimeFormat ) + " " ) ... )
}
data = append ( data , [ ] byte ( fmt . Sprintf ( "%s.node.id=%s,%s.service.id=%s,%s.task.id=%s " ,
contextPrefix , msg . Context . NodeID ,
contextPrefix , msg . Context . ServiceID ,
contextPrefix , msg . Context . TaskID ,
) ) ... )
data = append ( data , msg . Data ... )
switch msg . Stream {
case swarmapi . LogStreamStdout :
outStream . Write ( data )
case swarmapi . LogStreamStderr :
errStream . Write ( data )
}
}
}
}
2016-06-13 22:52:49 -04:00
// GetNodes returns a list of all nodes known to a cluster.
func ( c * Cluster ) GetNodes ( options apitypes . NodeListOptions ) ( [ ] types . Node , error ) {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return nil , c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-11-01 10:01:16 -04:00
filters , err := newListNodesFilters ( options . Filters )
2016-06-13 22:52:49 -04:00
if err != nil {
return nil , err
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-06-13 22:52:49 -04:00
r , err := c . client . ListNodes (
2016-07-15 13:58:21 -04:00
ctx ,
2016-06-13 22:52:49 -04:00
& swarmapi . ListNodesRequest { Filters : filters } )
if err != nil {
return nil , err
}
nodes := [ ] types . Node { }
for _ , node := range r . Nodes {
nodes = append ( nodes , convert . NodeFromGRPC ( * node ) )
}
return nodes , nil
}
2016-06-24 14:52:28 -04:00
// GetNode returns a node based on an ID or name.
2016-06-13 22:52:49 -04:00
func ( c * Cluster ) GetNode ( input string ) ( types . Node , error ) {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return types . Node { } , c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
node , err := getNode ( ctx , c . client , input )
2016-06-13 22:52:49 -04:00
if err != nil {
return types . Node { } , err
}
return convert . NodeFromGRPC ( * node ) , nil
}
// UpdateNode updates existing nodes properties.
2016-10-21 06:16:47 -04:00
func ( c * Cluster ) UpdateNode ( input string , version uint64 , spec types . NodeSpec ) error {
2016-06-13 22:52:49 -04:00
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
nodeSpec , err := convert . NodeSpecToGRPC ( spec )
if err != nil {
return err
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-10-21 06:16:47 -04:00
currentNode , err := getNode ( ctx , c . client , input )
if err != nil {
return err
}
2016-06-13 22:52:49 -04:00
_ , err = c . client . UpdateNode (
2016-07-15 13:58:21 -04:00
ctx ,
2016-06-13 22:52:49 -04:00
& swarmapi . UpdateNodeRequest {
2016-10-21 06:16:47 -04:00
NodeID : currentNode . ID ,
2016-06-13 22:52:49 -04:00
Spec : & nodeSpec ,
NodeVersion : & swarmapi . Version {
Index : version ,
} ,
} ,
)
return err
}
// RemoveNode removes a node from a cluster
2016-07-28 00:17:00 -04:00
func ( c * Cluster ) RemoveNode ( input string , force bool ) error {
2016-06-13 22:52:49 -04:00
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-06-13 22:52:49 -04:00
node , err := getNode ( ctx , c . client , input )
if err != nil {
return err
}
2016-07-28 00:17:00 -04:00
if _ , err := c . client . RemoveNode ( ctx , & swarmapi . RemoveNodeRequest { NodeID : node . ID , Force : force } ) ; err != nil {
2016-06-13 22:52:49 -04:00
return err
}
return nil
}
// GetTasks returns a list of tasks matching the filter options.
func ( c * Cluster ) GetTasks ( options apitypes . TaskListOptions ) ( [ ] types . Task , error ) {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return nil , c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-06-30 09:09:03 -04:00
byName := func ( filter filters . Args ) error {
if filter . Include ( "service" ) {
serviceFilters := filter . Get ( "service" )
for _ , serviceFilter := range serviceFilters {
service , err := c . GetService ( serviceFilter )
if err != nil {
return err
}
filter . Del ( "service" , serviceFilter )
filter . Add ( "service" , service . ID )
}
}
if filter . Include ( "node" ) {
nodeFilters := filter . Get ( "node" )
for _ , nodeFilter := range nodeFilters {
node , err := c . GetNode ( nodeFilter )
if err != nil {
return err
}
filter . Del ( "node" , nodeFilter )
filter . Add ( "node" , node . ID )
}
}
return nil
}
2016-11-01 10:01:16 -04:00
filters , err := newListTasksFilters ( options . Filters , byName )
2016-06-13 22:52:49 -04:00
if err != nil {
return nil , err
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-06-13 22:52:49 -04:00
r , err := c . client . ListTasks (
2016-07-15 13:58:21 -04:00
ctx ,
2016-06-13 22:52:49 -04:00
& swarmapi . ListTasksRequest { Filters : filters } )
if err != nil {
return nil , err
}
tasks := [ ] types . Task { }
for _ , task := range r . Tasks {
2016-09-13 20:44:06 -04:00
if task . Spec . GetContainer ( ) != nil {
tasks = append ( tasks , convert . TaskFromGRPC ( * task ) )
}
2016-06-13 22:52:49 -04:00
}
return tasks , nil
}
// GetTask returns a task by an ID.
func ( c * Cluster ) GetTask ( input string ) ( types . Task , error ) {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return types . Task { } , c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
task , err := getTask ( ctx , c . client , input )
2016-06-13 22:52:49 -04:00
if err != nil {
return types . Task { } , err
}
return convert . TaskFromGRPC ( * task ) , nil
}
2016-06-24 14:52:28 -04:00
// GetNetwork returns a cluster network by an ID.
2016-06-13 22:52:49 -04:00
func ( c * Cluster ) GetNetwork ( input string ) ( apitypes . NetworkResource , error ) {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return apitypes . NetworkResource { } , c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
network , err := getNetwork ( ctx , c . client , input )
2016-06-13 22:52:49 -04:00
if err != nil {
return apitypes . NetworkResource { } , err
}
return convert . BasicNetworkFromGRPC ( * network ) , nil
}
// GetNetworks returns all current cluster managed networks.
func ( c * Cluster ) GetNetworks ( ) ( [ ] apitypes . NetworkResource , error ) {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return nil , c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
r , err := c . client . ListNetworks ( ctx , & swarmapi . ListNetworksRequest { } )
2016-06-13 22:52:49 -04:00
if err != nil {
return nil , err
}
var networks [ ] apitypes . NetworkResource
for _ , network := range r . Networks {
networks = append ( networks , convert . BasicNetworkFromGRPC ( * network ) )
}
return networks , nil
}
2016-08-23 19:50:15 -04:00
func attacherKey ( target , containerID string ) string {
return containerID + ":" + target
}
// UpdateAttachment signals the attachment config to the attachment
// waiter who is trying to start or attach the container to the
// network.
func ( c * Cluster ) UpdateAttachment ( target , containerID string , config * network . NetworkingConfig ) error {
c . RLock ( )
attacher , ok := c . attachers [ attacherKey ( target , containerID ) ]
c . RUnlock ( )
if ! ok || attacher == nil {
return fmt . Errorf ( "could not find attacher for container %s to network %s" , containerID , target )
}
attacher . attachWaitCh <- config
close ( attacher . attachWaitCh )
return nil
}
// WaitForDetachment waits for the container to stop or detach from
// the network.
func ( c * Cluster ) WaitForDetachment ( ctx context . Context , networkName , networkID , taskID , containerID string ) error {
c . RLock ( )
attacher , ok := c . attachers [ attacherKey ( networkName , containerID ) ]
if ! ok {
attacher , ok = c . attachers [ attacherKey ( networkID , containerID ) ]
}
if c . node == nil || c . node . Agent ( ) == nil {
c . RUnlock ( )
return fmt . Errorf ( "invalid cluster node while waiting for detachment" )
}
agent := c . node . Agent ( )
c . RUnlock ( )
2016-09-09 12:55:57 -04:00
if ok && attacher != nil &&
attacher . detachWaitCh != nil &&
attacher . attachCompleteCh != nil {
// Attachment may be in progress still so wait for
// attachment to complete.
2016-08-23 19:50:15 -04:00
select {
2016-09-09 12:55:57 -04:00
case <- attacher . attachCompleteCh :
2016-08-23 19:50:15 -04:00
case <- ctx . Done ( ) :
return ctx . Err ( )
}
2016-09-09 12:55:57 -04:00
if attacher . taskID == taskID {
select {
case <- attacher . detachWaitCh :
case <- ctx . Done ( ) :
return ctx . Err ( )
}
}
2016-08-23 19:50:15 -04:00
}
return agent . ResourceAllocator ( ) . DetachNetwork ( ctx , taskID )
}
// AttachNetwork generates an attachment request towards the manager.
func ( c * Cluster ) AttachNetwork ( target string , containerID string , addresses [ ] string ) ( * network . NetworkingConfig , error ) {
aKey := attacherKey ( target , containerID )
c . Lock ( )
if c . node == nil || c . node . Agent ( ) == nil {
c . Unlock ( )
return nil , fmt . Errorf ( "invalid cluster node while attaching to network" )
}
if attacher , ok := c . attachers [ aKey ] ; ok {
c . Unlock ( )
return attacher . config , nil
}
agent := c . node . Agent ( )
attachWaitCh := make ( chan * network . NetworkingConfig )
detachWaitCh := make ( chan struct { } )
2016-09-09 12:55:57 -04:00
attachCompleteCh := make ( chan struct { } )
2016-08-23 19:50:15 -04:00
c . attachers [ aKey ] = & attacher {
2016-09-09 12:55:57 -04:00
attachWaitCh : attachWaitCh ,
attachCompleteCh : attachCompleteCh ,
detachWaitCh : detachWaitCh ,
2016-08-23 19:50:15 -04:00
}
c . Unlock ( )
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
taskID , err := agent . ResourceAllocator ( ) . AttachNetwork ( ctx , containerID , target , addresses )
if err != nil {
c . Lock ( )
delete ( c . attachers , aKey )
c . Unlock ( )
return nil , fmt . Errorf ( "Could not attach to network %s: %v" , target , err )
}
2016-09-09 12:55:57 -04:00
c . Lock ( )
c . attachers [ aKey ] . taskID = taskID
close ( attachCompleteCh )
c . Unlock ( )
2016-08-23 19:50:15 -04:00
logrus . Debugf ( "Successfully attached to network %s with tid %s" , target , taskID )
var config * network . NetworkingConfig
select {
case config = <- attachWaitCh :
case <- ctx . Done ( ) :
return nil , fmt . Errorf ( "attaching to network failed, make sure your network options are correct and check manager logs: %v" , ctx . Err ( ) )
}
c . Lock ( )
c . attachers [ aKey ] . config = config
c . Unlock ( )
return config , nil
}
// DetachNetwork unblocks the waiters waiting on WaitForDetachment so
// that a request to detach can be generated towards the manager.
func ( c * Cluster ) DetachNetwork ( target string , containerID string ) error {
aKey := attacherKey ( target , containerID )
c . Lock ( )
attacher , ok := c . attachers [ aKey ]
delete ( c . attachers , aKey )
c . Unlock ( )
if ! ok {
return fmt . Errorf ( "could not find network attachment for container %s to network %s" , containerID , target )
}
close ( attacher . detachWaitCh )
return nil
}
2016-06-13 22:52:49 -04:00
// CreateNetwork creates a new cluster managed network.
func ( c * Cluster ) CreateNetwork ( s apitypes . NetworkCreateRequest ) ( string , error ) {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return "" , c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
if runconfig . IsPreDefinedNetwork ( s . Name ) {
err := fmt . Errorf ( "%s is a pre-defined network and cannot be created" , s . Name )
2016-10-21 21:07:55 -04:00
return "" , apierrors . NewRequestForbiddenError ( err )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
2016-06-13 22:52:49 -04:00
networkSpec := convert . BasicNetworkCreateToGRPC ( s )
2016-07-15 13:58:21 -04:00
r , err := c . client . CreateNetwork ( ctx , & swarmapi . CreateNetworkRequest { Spec : & networkSpec } )
2016-06-13 22:52:49 -04:00
if err != nil {
return "" , err
}
return r . Network . ID , nil
}
// RemoveNetwork removes a cluster network.
func ( c * Cluster ) RemoveNetwork ( input string ) error {
c . RLock ( )
defer c . RUnlock ( )
if ! c . isActiveManager ( ) {
2016-06-23 16:52:41 -04:00
return c . errNoManager ( )
2016-06-13 22:52:49 -04:00
}
2016-07-15 13:58:21 -04:00
ctx , cancel := c . getRequestContext ( )
defer cancel ( )
network , err := getNetwork ( ctx , c . client , input )
2016-06-13 22:52:49 -04:00
if err != nil {
return err
}
2016-07-15 13:58:21 -04:00
if _ , err := c . client . RemoveNetwork ( ctx , & swarmapi . RemoveNetworkRequest { NetworkID : network . ID } ) ; err != nil {
2016-06-13 22:52:49 -04:00
return err
}
return nil
}
2016-07-23 11:11:10 -04:00
func ( c * Cluster ) populateNetworkID ( ctx context . Context , client swarmapi . ControlClient , s * types . ServiceSpec ) error {
2016-08-23 19:50:15 -04:00
// Always prefer NetworkAttachmentConfigs from TaskTemplate
// but fallback to service spec for backward compatibility
networks := s . TaskTemplate . Networks
if len ( networks ) == 0 {
networks = s . Networks
}
for i , n := range networks {
2016-07-23 11:11:10 -04:00
apiNetwork , err := getNetwork ( ctx , client , n . Target )
2016-06-13 22:52:49 -04:00
if err != nil {
2016-07-23 11:11:10 -04:00
if ln , _ := c . config . Backend . FindNetwork ( n . Target ) ; ln != nil && ! ln . Info ( ) . Dynamic ( ) {
err = fmt . Errorf ( "network %s is not eligible for docker services" , ln . Name ( ) )
2016-10-21 21:07:55 -04:00
return apierrors . NewRequestForbiddenError ( err )
2016-07-23 11:11:10 -04:00
}
2016-06-13 22:52:49 -04:00
return err
}
2016-08-23 19:50:15 -04:00
networks [ i ] . Target = apiNetwork . ID
2016-06-13 22:52:49 -04:00
}
return nil
}
// Cleanup stops active swarm node. This is run before daemon shutdown.
func ( c * Cluster ) Cleanup ( ) {
c . Lock ( )
node := c . node
if node == nil {
c . Unlock ( )
return
}
2016-06-20 19:35:33 -04:00
defer c . Unlock ( )
2016-06-13 22:52:49 -04:00
if c . isActiveManager ( ) {
active , reachable , unreachable , err := c . managerStats ( )
if err == nil {
2016-08-19 16:49:58 -04:00
singlenode := active && isLastManager ( reachable , unreachable )
if active && ! singlenode && removingManagerCausesLossOfQuorum ( reachable , unreachable ) {
2016-06-13 22:52:49 -04:00
logrus . Errorf ( "Leaving cluster with %v managers left out of %v. Raft quorum will be lost." , reachable - 1 , reachable + unreachable )
}
}
}
2016-06-20 19:35:33 -04:00
c . stopNode ( )
2016-06-13 22:52:49 -04:00
}
func ( c * Cluster ) managerStats ( ) ( current bool , reachable int , unreachable int , err error ) {
2016-07-15 13:58:21 -04:00
ctx , cancel := context . WithTimeout ( context . Background ( ) , 5 * time . Second )
defer cancel ( )
2016-06-13 22:52:49 -04:00
nodes , err := c . client . ListNodes ( ctx , & swarmapi . ListNodesRequest { } )
if err != nil {
return false , 0 , 0 , err
}
for _ , n := range nodes . Nodes {
if n . ManagerStatus != nil {
2016-06-14 20:23:01 -04:00
if n . ManagerStatus . Reachability == swarmapi . RaftMemberStatus_REACHABLE {
2016-06-13 22:52:49 -04:00
reachable ++
if n . ID == c . node . NodeID ( ) {
current = true
}
}
2016-06-14 20:23:01 -04:00
if n . ManagerStatus . Reachability == swarmapi . RaftMemberStatus_UNREACHABLE {
2016-06-13 22:52:49 -04:00
unreachable ++
}
}
}
return
}
2016-06-21 17:27:04 -04:00
func validateAndSanitizeInitRequest ( req * types . InitRequest ) error {
var err error
req . ListenAddr , err = validateAddr ( req . ListenAddr )
if err != nil {
return fmt . Errorf ( "invalid ListenAddr %q: %v" , req . ListenAddr , err )
}
return nil
}
func validateAndSanitizeJoinRequest ( req * types . JoinRequest ) error {
var err error
req . ListenAddr , err = validateAddr ( req . ListenAddr )
if err != nil {
return fmt . Errorf ( "invalid ListenAddr %q: %v" , req . ListenAddr , err )
}
if len ( req . RemoteAddrs ) == 0 {
return fmt . Errorf ( "at least 1 RemoteAddr is required to join" )
}
for i := range req . RemoteAddrs {
req . RemoteAddrs [ i ] , err = validateAddr ( req . RemoteAddrs [ i ] )
if err != nil {
return fmt . Errorf ( "invalid remoteAddr %q: %v" , req . RemoteAddrs [ i ] , err )
}
}
return nil
}
func validateAddr ( addr string ) ( string , error ) {
if addr == "" {
return addr , fmt . Errorf ( "invalid empty address" )
}
newaddr , err := opts . ParseTCPAddr ( addr , defaultAddr )
if err != nil {
return addr , nil
}
return strings . TrimPrefix ( newaddr , "tcp://" ) , nil
}
2016-06-20 19:35:33 -04:00
func initClusterSpec ( node * node , spec types . Spec ) error {
2016-06-13 22:52:49 -04:00
ctx , _ := context . WithTimeout ( context . Background ( ) , 5 * time . Second )
for conn := range node . ListenControlSocket ( ctx ) {
if ctx . Err ( ) != nil {
return ctx . Err ( )
}
if conn != nil {
client := swarmapi . NewControlClient ( conn )
var cluster * swarmapi . Cluster
for i := 0 ; ; i ++ {
lcr , err := client . ListClusters ( ctx , & swarmapi . ListClustersRequest { } )
if err != nil {
return fmt . Errorf ( "error on listing clusters: %v" , err )
}
if len ( lcr . Clusters ) == 0 {
if i < 10 {
time . Sleep ( 200 * time . Millisecond )
continue
}
return fmt . Errorf ( "empty list of clusters was returned" )
}
cluster = lcr . Clusters [ 0 ]
break
}
2016-08-26 00:08:53 -04:00
// In init, we take the initial default values from swarmkit, and merge
// any non nil or 0 value from spec to GRPC spec. This will leave the
// default value alone.
// Note that this is different from Update(), as in Update() we expect
// user to specify the complete spec of the cluster (as they already know
// the existing one and knows which field to update)
clusterSpec , err := convert . MergeSwarmSpecToGRPC ( spec , cluster . Spec )
2016-06-21 17:27:04 -04:00
if err != nil {
2016-06-13 22:52:49 -04:00
return fmt . Errorf ( "error updating cluster settings: %v" , err )
}
2016-06-21 17:27:04 -04:00
_ , err = client . UpdateCluster ( ctx , & swarmapi . UpdateClusterRequest {
2016-06-13 22:52:49 -04:00
ClusterID : cluster . ID ,
ClusterVersion : & cluster . Meta . Version ,
2016-08-26 00:08:53 -04:00
Spec : & clusterSpec ,
2016-06-13 22:52:49 -04:00
} )
if err != nil {
return fmt . Errorf ( "error updating cluster settings: %v" , err )
}
return nil
}
}
return ctx . Err ( )
}
2016-10-21 21:07:55 -04:00
func detectLockedError ( err error ) error {
2016-10-27 21:50:49 -04:00
if err == swarmnode . ErrInvalidUnlockKey {
2016-10-21 21:07:55 -04:00
return errors . WithStack ( ErrSwarmLocked )
}
return err
}