2015-07-30 17:01:53 -04:00
// Package daemon exposes the functions that occur on the host server
// that the Docker daemon is running.
//
// In implementing the various functions of the daemon, there is often
// a method-specific struct for configuring the runtime behavior.
2014-04-17 17:43:01 -04:00
package daemon
2013-01-18 19:13:39 -05:00
import (
2016-05-08 19:11:34 -04:00
"encoding/json"
2013-01-18 19:13:39 -05:00
"fmt"
2014-04-28 17:36:04 -04:00
"io"
"io/ioutil"
2015-12-17 12:35:24 -05:00
"net"
2014-04-28 17:36:04 -04:00
"os"
2015-09-03 20:51:04 -04:00
"path"
2015-01-16 14:48:25 -05:00
"path/filepath"
2014-07-30 02:51:43 -04:00
"runtime"
Remove static errors from errors package.
Moving all strings to the errors package wasn't a good idea after all.
Our custom implementation of Go errors predates everything that's nice
and good about working with errors in Go. Take as an example what we
have to do to get an error message:
```go
func GetErrorMessage(err error) string {
switch err.(type) {
case errcode.Error:
e, _ := err.(errcode.Error)
return e.Message
case errcode.ErrorCode:
ec, _ := err.(errcode.ErrorCode)
return ec.Message()
default:
return err.Error()
}
}
```
This goes against every good practice for Go development. The language already provides a simple, intuitive and standard way to get error messages, that is calling the `Error()` method from an error. Reinventing the error interface is a mistake.
Our custom implementation also makes very hard to reason about errors, another nice thing about Go. I found several (>10) error declarations that we don't use anywhere. This is a clear sign about how little we know about the errors we return. I also found several error usages where the number of arguments was different than the parameters declared in the error, another clear example of how difficult is to reason about errors.
Moreover, our custom implementation didn't really make easier for people to return custom HTTP status code depending on the errors. Again, it's hard to reason about when to set custom codes and how. Take an example what we have to do to extract the message and status code from an error before returning a response from the API:
```go
switch err.(type) {
case errcode.ErrorCode:
daError, _ := err.(errcode.ErrorCode)
statusCode = daError.Descriptor().HTTPStatusCode
errMsg = daError.Message()
case errcode.Error:
// For reference, if you're looking for a particular error
// then you can do something like :
// import ( derr "github.com/docker/docker/errors" )
// if daError.ErrorCode() == derr.ErrorCodeNoSuchContainer { ... }
daError, _ := err.(errcode.Error)
statusCode = daError.ErrorCode().Descriptor().HTTPStatusCode
errMsg = daError.Message
default:
// This part of will be removed once we've
// converted everything over to use the errcode package
// FIXME: this is brittle and should not be necessary.
// If we need to differentiate between different possible error types,
// we should create appropriate error types with clearly defined meaning
errStr := strings.ToLower(err.Error())
for keyword, status := range map[string]int{
"not found": http.StatusNotFound,
"no such": http.StatusNotFound,
"bad parameter": http.StatusBadRequest,
"conflict": http.StatusConflict,
"impossible": http.StatusNotAcceptable,
"wrong login/password": http.StatusUnauthorized,
"hasn't been activated": http.StatusForbidden,
} {
if strings.Contains(errStr, keyword) {
statusCode = status
break
}
}
}
```
You can notice two things in that code:
1. We have to explain how errors work, because our implementation goes against how easy to use Go errors are.
2. At no moment we arrived to remove that `switch` statement that was the original reason to use our custom implementation.
This change removes all our status errors from the errors package and puts them back in their specific contexts.
IT puts the messages back with their contexts. That way, we know right away when errors used and how to generate their messages.
It uses custom interfaces to reason about errors. Errors that need to response with a custom status code MUST implementent this simple interface:
```go
type errorWithStatus interface {
HTTPErrorStatusCode() int
}
```
This interface is very straightforward to implement. It also preserves Go errors real behavior, getting the message is as simple as using the `Error()` method.
I included helper functions to generate errors that use custom status code in `errors/errors.go`.
By doing this, we remove the hard dependency we have eeverywhere to our custom errors package. Yes, you can use it as a helper to generate error, but it's still very easy to generate errors without it.
Please, read this fantastic blog post about errors in Go: http://dave.cheney.net/2014/12/24/inspecting-errors
Signed-off-by: David Calavera <david.calavera@gmail.com>
2016-02-25 10:53:35 -05:00
"strings"
2014-04-28 17:36:04 -04:00
"sync"
2015-12-17 12:35:24 -05:00
"syscall"
2014-04-28 17:36:04 -04:00
"time"
2015-03-26 18:22:04 -04:00
"github.com/Sirupsen/logrus"
2016-03-18 14:50:19 -04:00
containerd "github.com/docker/containerd/api/grpc/types"
2014-11-17 14:23:41 -05:00
"github.com/docker/docker/api"
2016-09-06 14:18:12 -04:00
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
2015-11-12 14:55:17 -05:00
"github.com/docker/docker/container"
2015-04-03 18:17:49 -04:00
"github.com/docker/docker/daemon/events"
2015-11-20 17:35:16 -05:00
"github.com/docker/docker/daemon/exec"
2016-07-20 19:11:28 -04:00
"github.com/docker/docker/dockerversion"
2016-11-09 20:49:09 -05:00
"github.com/docker/docker/plugin"
2016-06-13 22:52:49 -04:00
"github.com/docker/libnetwork/cluster"
2015-12-23 13:43:34 -05:00
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register"
2015-11-18 17:20:54 -05:00
dmetadata "github.com/docker/docker/distribution/metadata"
2015-11-13 19:59:01 -05:00
"github.com/docker/docker/distribution/xfer"
2015-07-20 13:57:15 -04:00
"github.com/docker/docker/image"
2015-11-18 17:20:54 -05:00
"github.com/docker/docker/layer"
2016-03-18 14:50:19 -04:00
"github.com/docker/docker/libcontainerd"
2015-11-18 17:20:54 -05:00
"github.com/docker/docker/migrate/v1"
2015-03-29 17:17:23 -04:00
"github.com/docker/docker/pkg/fileutils"
2015-11-21 13:45:34 -05:00
"github.com/docker/docker/pkg/graphdb"
2015-10-08 11:51:41 -04:00
"github.com/docker/docker/pkg/idtools"
2016-10-07 16:53:14 -04:00
"github.com/docker/docker/pkg/plugingetter"
2015-11-13 19:59:01 -05:00
"github.com/docker/docker/pkg/progress"
2015-09-03 20:51:04 -04:00
"github.com/docker/docker/pkg/registrar"
2015-08-24 21:42:58 -04:00
"github.com/docker/docker/pkg/signal"
2015-11-13 19:59:01 -05:00
"github.com/docker/docker/pkg/streamformatter"
2014-07-24 18:19:50 -04:00
"github.com/docker/docker/pkg/sysinfo"
2015-05-15 19:34:26 -04:00
"github.com/docker/docker/pkg/system"
2014-07-24 18:19:50 -04:00
"github.com/docker/docker/pkg/truncindex"
2016-09-07 20:01:10 -04:00
pluginstore "github.com/docker/docker/plugin/store"
2015-12-04 16:55:15 -05:00
"github.com/docker/docker/reference"
2015-03-31 19:21:37 -04:00
"github.com/docker/docker/registry"
2014-07-24 18:19:50 -04:00
"github.com/docker/docker/runconfig"
2015-09-16 17:18:24 -04:00
volumedrivers "github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
2015-09-18 19:58:05 -04:00
"github.com/docker/docker/volume/store"
2015-05-15 19:34:26 -04:00
"github.com/docker/libnetwork"
2016-03-09 23:33:21 -05:00
nwconfig "github.com/docker/libnetwork/config"
2015-11-18 17:20:54 -05:00
"github.com/docker/libtrust"
2015-11-13 19:59:01 -05:00
)
2013-12-12 16:34:26 -05:00
var (
2016-05-23 17:49:50 -04:00
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "docker-runc"
2016-10-24 18:18:58 -04:00
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
Remove static errors from errors package.
Moving all strings to the errors package wasn't a good idea after all.
Our custom implementation of Go errors predates everything that's nice
and good about working with errors in Go. Take as an example what we
have to do to get an error message:
```go
func GetErrorMessage(err error) string {
switch err.(type) {
case errcode.Error:
e, _ := err.(errcode.Error)
return e.Message
case errcode.ErrorCode:
ec, _ := err.(errcode.ErrorCode)
return ec.Message()
default:
return err.Error()
}
}
```
This goes against every good practice for Go development. The language already provides a simple, intuitive and standard way to get error messages, that is calling the `Error()` method from an error. Reinventing the error interface is a mistake.
Our custom implementation also makes very hard to reason about errors, another nice thing about Go. I found several (>10) error declarations that we don't use anywhere. This is a clear sign about how little we know about the errors we return. I also found several error usages where the number of arguments was different than the parameters declared in the error, another clear example of how difficult is to reason about errors.
Moreover, our custom implementation didn't really make easier for people to return custom HTTP status code depending on the errors. Again, it's hard to reason about when to set custom codes and how. Take an example what we have to do to extract the message and status code from an error before returning a response from the API:
```go
switch err.(type) {
case errcode.ErrorCode:
daError, _ := err.(errcode.ErrorCode)
statusCode = daError.Descriptor().HTTPStatusCode
errMsg = daError.Message()
case errcode.Error:
// For reference, if you're looking for a particular error
// then you can do something like :
// import ( derr "github.com/docker/docker/errors" )
// if daError.ErrorCode() == derr.ErrorCodeNoSuchContainer { ... }
daError, _ := err.(errcode.Error)
statusCode = daError.ErrorCode().Descriptor().HTTPStatusCode
errMsg = daError.Message
default:
// This part of will be removed once we've
// converted everything over to use the errcode package
// FIXME: this is brittle and should not be necessary.
// If we need to differentiate between different possible error types,
// we should create appropriate error types with clearly defined meaning
errStr := strings.ToLower(err.Error())
for keyword, status := range map[string]int{
"not found": http.StatusNotFound,
"no such": http.StatusNotFound,
"bad parameter": http.StatusBadRequest,
"conflict": http.StatusConflict,
"impossible": http.StatusNotAcceptable,
"wrong login/password": http.StatusUnauthorized,
"hasn't been activated": http.StatusForbidden,
} {
if strings.Contains(errStr, keyword) {
statusCode = status
break
}
}
}
```
You can notice two things in that code:
1. We have to explain how errors work, because our implementation goes against how easy to use Go errors are.
2. At no moment we arrived to remove that `switch` statement that was the original reason to use our custom implementation.
This change removes all our status errors from the errors package and puts them back in their specific contexts.
IT puts the messages back with their contexts. That way, we know right away when errors used and how to generate their messages.
It uses custom interfaces to reason about errors. Errors that need to response with a custom status code MUST implementent this simple interface:
```go
type errorWithStatus interface {
HTTPErrorStatusCode() int
}
```
This interface is very straightforward to implement. It also preserves Go errors real behavior, getting the message is as simple as using the `Error()` method.
I included helper functions to generate errors that use custom status code in `errors/errors.go`.
By doing this, we remove the hard dependency we have eeverywhere to our custom errors package. Yes, you can use it as a helper to generate error, but it's still very easy to generate errors without it.
Please, read this fantastic blog post about errors in Go: http://dave.cheney.net/2014/12/24/inspecting-errors
Signed-off-by: David Calavera <david.calavera@gmail.com>
2016-02-25 10:53:35 -05:00
errSystemNotSupported = fmt . Errorf ( "The Docker daemon is not supported on this platform." )
2013-12-12 16:34:26 -05:00
)
2013-09-06 20:33:05 -04:00
2015-07-30 17:01:53 -04:00
// Daemon holds information about the Docker daemon.
2014-04-17 17:43:01 -04:00
type Daemon struct {
2015-11-18 17:20:54 -05:00
ID string
repository string
2016-01-15 18:55:46 -05:00
containers container . Store
2015-11-18 17:20:54 -05:00
execCommands * exec . Store
2015-12-04 16:55:15 -05:00
referenceStore reference . Store
2015-11-13 19:59:01 -05:00
downloadManager * xfer . LayerDownloadManager
uploadManager * xfer . LayerUploadManager
2015-11-18 17:20:54 -05:00
distributionMetadataStore dmetadata . Store
trustKey libtrust . PrivateKey
idIndex * truncindex . TruncIndex
configStore * Config
statsCollector * statsCollector
2015-12-18 13:36:17 -05:00
defaultLogConfig containertypes . LogConfig
2016-05-21 10:00:28 -04:00
RegistryService registry . Service
2015-11-18 17:20:54 -05:00
EventsService * events . Events
netController libnetwork . NetworkController
volumes * store . VolumeStore
2015-12-10 18:35:10 -05:00
discoveryWatcher discoveryReloader
2015-11-18 17:20:54 -05:00
root string
2016-01-11 14:44:34 -05:00
seccompEnabled bool
2015-11-18 17:20:54 -05:00
shutdown bool
uidMaps [ ] idtools . IDMap
gidMaps [ ] idtools . IDMap
layerStore layer . Store
imageStore image . Store
2016-10-07 17:53:17 -04:00
PluginStore * pluginstore . Store
2015-09-03 20:51:04 -04:00
nameIndex * registrar . Registrar
linkIndex * linkIndex
2016-03-18 14:50:19 -04:00
containerd libcontainerd . Client
2016-06-02 14:10:55 -04:00
containerdRemote libcontainerd . Remote
2016-03-18 14:50:19 -04:00
defaultIsolation containertypes . Isolation // Default isolation mode on Windows
2016-06-13 22:52:49 -04:00
clusterProvider cluster . Provider
2016-10-18 00:36:52 -04:00
cluster Cluster
2016-09-02 09:20:54 -04:00
seccompProfile [ ] byte
seccompProfilePath string
2013-03-21 03:25:00 -04:00
}
2016-10-06 10:09:54 -04:00
// HasExperimental returns whether the experimental features of the daemon are enabled or not
func ( daemon * Daemon ) HasExperimental ( ) bool {
if daemon . configStore != nil && daemon . configStore . Experimental {
return true
}
return false
}
2015-09-29 13:51:40 -04:00
func ( daemon * Daemon ) restore ( ) error {
2014-06-05 20:31:58 -04:00
var (
2015-12-16 15:32:16 -05:00
currentDriver = daemon . GraphDriverName ( )
2015-09-03 20:51:04 -04:00
containers = make ( map [ string ] * container . Container )
2014-06-05 20:31:58 -04:00
)
2014-05-30 14:03:56 -04:00
2016-09-28 02:41:19 -04:00
logrus . Info ( "Loading containers: start." )
2014-04-17 17:43:01 -04:00
dir , err := ioutil . ReadDir ( daemon . repository )
2013-01-18 19:13:39 -05:00
if err != nil {
return err
}
2013-10-24 19:49:28 -04:00
2013-12-18 13:43:42 -05:00
for _ , v := range dir {
2013-03-21 03:25:00 -04:00
id := v . Name ( )
2014-04-17 17:43:01 -04:00
container , err := daemon . load ( id )
2013-01-18 19:13:39 -05:00
if err != nil {
2015-03-26 18:22:04 -04:00
logrus . Errorf ( "Failed to load container %v: %v" , id , err )
2013-01-18 19:13:39 -05:00
continue
}
2013-11-15 01:52:08 -05:00
// Ignore the container if it does not support the current driver being used by the graph
2014-08-06 13:40:43 -04:00
if ( container . Driver == "" && currentDriver == "aufs" ) || container . Driver == currentDriver {
2016-01-26 20:20:30 -05:00
rwlayer , err := daemon . layerStore . GetRWLayer ( container . ID )
if err != nil {
logrus . Errorf ( "Failed to load container mount %v: %v" , id , err )
continue
}
container . RWLayer = rwlayer
2015-03-26 18:22:04 -04:00
logrus . Debugf ( "Loaded container %v" , container . ID )
2014-08-06 13:40:43 -04:00
2015-09-03 20:51:04 -04:00
containers [ container . ID ] = container
2013-11-15 01:52:08 -05:00
} else {
2015-03-26 18:22:04 -04:00
logrus . Debugf ( "Cannot load container %s because it was created with another graph driver." , container . ID )
2013-11-15 01:52:08 -05:00
}
2013-10-04 22:25:15 -04:00
}
2015-11-21 13:45:34 -05:00
var migrateLegacyLinks bool
2016-03-01 11:30:27 -05:00
removeContainers := make ( map [ string ] * container . Container )
2015-11-24 15:25:12 -05:00
restartContainers := make ( map [ * container . Container ] chan struct { } )
2016-06-14 12:13:53 -04:00
activeSandboxes := make ( map [ string ] interface { } )
2016-09-29 11:35:10 -04:00
for id , c := range containers {
2015-09-03 20:51:04 -04:00
if err := daemon . registerName ( c ) ; err != nil {
logrus . Errorf ( "Failed to register container %s: %s" , c . ID , err )
2016-09-29 11:35:10 -04:00
delete ( containers , id )
2015-09-03 20:51:04 -04:00
continue
2015-11-24 15:25:12 -05:00
}
2015-09-03 20:51:04 -04:00
if err := daemon . Register ( c ) ; err != nil {
logrus . Errorf ( "Failed to register container %s: %s" , c . ID , err )
2016-09-29 11:35:10 -04:00
delete ( containers , id )
2015-11-24 15:25:12 -05:00
continue
}
2016-05-07 12:20:24 -04:00
2016-09-06 09:49:10 -04:00
// verify that all volumes valid and have been migrated from the pre-1.7 layout
if err := daemon . verifyVolumesInfo ( c ) ; err != nil {
// don't skip the container due to error
logrus . Errorf ( "Failed to verify volumes for container '%s': %v" , c . ID , err )
}
2016-05-07 12:20:24 -04:00
// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
// We should rewrite it to use the daemon defaults.
// Fixes https://github.com/docker/docker/issues/22536
if c . HostConfig . LogConfig . Type == "" {
if err := daemon . mergeAndVerifyLogConfig ( & c . HostConfig . LogConfig ) ; err != nil {
logrus . Errorf ( "Failed to verify log config for container %s: %q" , c . ID , err )
continue
}
}
2016-03-18 14:50:19 -04:00
}
var wg sync . WaitGroup
var mapLock sync . Mutex
for _ , c := range containers {
wg . Add ( 1 )
go func ( c * container . Container ) {
defer wg . Done ( )
2016-08-04 15:34:52 -04:00
if err := backportMountSpec ( c ) ; err != nil {
2016-11-01 00:05:01 -04:00
logrus . Error ( "Failed to migrate old mounts to use new spec format" )
2016-08-04 15:34:52 -04:00
}
2016-03-18 14:50:19 -04:00
if c . IsRunning ( ) || c . IsPaused ( ) {
2016-10-05 16:29:56 -04:00
c . RestartManager ( ) . Cancel ( ) // manually start containers because some need to wait for swarm networking
2016-10-17 17:39:52 -04:00
if err := daemon . containerd . Restore ( c . ID , c . InitializeStdio ) ; err != nil {
2016-07-11 11:55:39 -04:00
logrus . Errorf ( "Failed to restore %s with containerd: %s" , c . ID , err )
2016-03-18 14:50:19 -04:00
return
}
2016-10-05 16:29:56 -04:00
c . ResetRestartManager ( false )
2016-06-16 07:54:36 -04:00
if ! c . HostConfig . NetworkMode . IsContainer ( ) && c . IsRunning ( ) {
2016-06-14 12:13:53 -04:00
options , err := daemon . buildSandboxOptions ( c )
if err != nil {
logrus . Warnf ( "Failed build sandbox option to restore container %s: %v" , c . ID , err )
}
mapLock . Lock ( )
activeSandboxes [ c . NetworkSettings . SandboxID ] = options
mapLock . Unlock ( )
}
2016-03-18 14:50:19 -04:00
}
// fixme: only if not running
// get list of containers we need to restart
2016-03-01 11:30:27 -05:00
if ! c . IsRunning ( ) && ! c . IsPaused ( ) {
2016-09-09 12:55:57 -04:00
// Do not autostart containers which
// has endpoints in a swarm scope
// network yet since the cluster is
// not initialized yet. We will start
// it after the cluster is
// initialized.
if daemon . configStore . AutoRestart && c . ShouldRestart ( ) && ! c . NetworkSettings . HasSwarmEndpoint {
2016-03-01 11:30:27 -05:00
mapLock . Lock ( )
restartContainers [ c ] = make ( chan struct { } )
mapLock . Unlock ( )
} else if c . HostConfig != nil && c . HostConfig . AutoRemove {
2016-08-29 19:58:01 -04:00
mapLock . Lock ( )
2016-03-01 11:30:27 -05:00
removeContainers [ c . ID ] = c
2016-08-29 19:58:01 -04:00
mapLock . Unlock ( )
2016-03-01 11:30:27 -05:00
}
2016-03-18 14:50:19 -04:00
}
2015-09-03 20:51:04 -04:00
2016-04-29 14:38:13 -04:00
if c . RemovalInProgress {
// We probably crashed in the middle of a removal, reset
// the flag.
//
// We DO NOT remove the container here as we do not
// know if the user had requested for either the
// associated volumes, network links or both to also
// be removed. So we put the container in the "dead"
// state and leave further processing up to them.
logrus . Debugf ( "Resetting RemovalInProgress flag from %v" , c . ID )
c . ResetRemovalInProgress ( )
c . SetDead ( )
c . ToDisk ( )
}
2016-03-18 14:50:19 -04:00
// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
if c . HostConfig != nil && c . HostConfig . Links == nil {
migrateLegacyLinks = true
}
} ( c )
2015-11-21 13:45:34 -05:00
}
2016-03-18 14:50:19 -04:00
wg . Wait ( )
2016-06-14 12:13:53 -04:00
daemon . netController , err = daemon . initNetworkController ( daemon . configStore , activeSandboxes )
if err != nil {
return fmt . Errorf ( "Error initializing network controller: %v" , err )
}
2015-11-21 13:45:34 -05:00
// migrate any legacy links from sqlite
linkdbFile := filepath . Join ( daemon . root , "linkgraph.db" )
var legacyLinkDB * graphdb . Database
if migrateLegacyLinks {
legacyLinkDB , err = graphdb . NewSqliteConn ( linkdbFile )
if err != nil {
return fmt . Errorf ( "error connecting to legacy link graph DB %s, container links may be lost: %v" , linkdbFile , err )
}
defer legacyLinkDB . Close ( )
2015-09-03 20:51:04 -04:00
}
// Now that all the containers are registered, register the links
for _ , c := range containers {
2015-11-21 13:45:34 -05:00
if migrateLegacyLinks {
if err := daemon . migrateLegacySqliteLinks ( legacyLinkDB , c ) ; err != nil {
return err
}
}
2015-09-03 20:51:04 -04:00
if err := daemon . registerLinks ( c , c . HostConfig ) ; err != nil {
logrus . Errorf ( "failed to register link for container %s: %v" , c . ID , err )
2015-11-24 15:25:12 -05:00
}
}
2014-08-06 13:40:43 -04:00
2015-11-24 15:25:12 -05:00
group := sync . WaitGroup { }
for c , notifier := range restartContainers {
group . Add ( 1 )
2015-09-03 20:51:04 -04:00
go func ( c * container . Container , chNotify chan struct { } ) {
2015-11-24 15:25:12 -05:00
defer group . Done ( )
2015-09-03 20:51:04 -04:00
logrus . Debugf ( "Starting container %s" , c . ID )
2014-08-06 13:40:43 -04:00
2015-11-24 15:25:12 -05:00
// ignore errors here as this is a best effort to wait for children to be
// running before we try to start the container
2015-09-03 20:51:04 -04:00
children := daemon . children ( c )
2015-11-24 15:25:12 -05:00
timeout := time . After ( 5 * time . Second )
for _ , child := range children {
if notifier , exists := restartContainers [ child ] ; exists {
select {
case <- notifier :
case <- timeout :
}
2014-08-06 13:40:43 -04:00
}
}
2016-05-04 10:13:23 -04:00
// Make sure networks are available before starting
daemon . waitForNetworks ( c )
2016-09-19 12:01:16 -04:00
if err := daemon . containerStart ( c , "" , "" , true ) ; err != nil {
2015-09-03 20:51:04 -04:00
logrus . Errorf ( "Failed to start container %s: %s" , c . ID , err )
2015-11-24 15:25:12 -05:00
}
close ( chNotify )
} ( c , notifier )
2015-09-03 20:51:04 -04:00
2014-06-05 20:31:58 -04:00
}
2015-05-19 16:05:25 -04:00
group . Wait ( )
2014-06-05 20:31:58 -04:00
2016-03-01 11:30:27 -05:00
removeGroup := sync . WaitGroup { }
for id := range removeContainers {
removeGroup . Add ( 1 )
go func ( cid string ) {
if err := daemon . ContainerRm ( cid , & types . ContainerRmConfig { ForceRemove : true , RemoveVolume : true } ) ; err != nil {
logrus . Errorf ( "Failed to remove container %s: %s" , cid , err )
}
2016-06-09 11:32:20 -04:00
removeGroup . Done ( )
2016-03-01 11:30:27 -05:00
} ( id )
}
removeGroup . Wait ( )
2016-01-20 12:06:03 -05:00
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
// This must be run after any containers with a restart policy so that containerized plugins
// can have a chance to be running before we try to initialize them.
for _ , c := range containers {
2016-01-27 02:43:40 -05:00
// if the container has restart policy, do not
// prepare the mountpoints since it has been done on restarting.
// This is to speed up the daemon start when a restart container
// has a volume and the volume dirver is not available.
if _ , ok := restartContainers [ c ] ; ok {
continue
2016-03-01 11:30:27 -05:00
} else if _ , ok := removeContainers [ c . ID ] ; ok {
// container is automatically removed, skip it.
continue
2016-01-27 02:43:40 -05:00
}
2016-03-01 11:30:27 -05:00
2016-01-20 12:06:03 -05:00
group . Add ( 1 )
go func ( c * container . Container ) {
defer group . Done ( )
if err := daemon . prepareMountPoints ( c ) ; err != nil {
logrus . Error ( err )
}
} ( c )
}
group . Wait ( )
2016-09-28 02:41:19 -04:00
logrus . Info ( "Loading containers: done." )
2013-10-04 22:25:15 -04:00
2013-01-18 19:13:39 -05:00
return nil
}
2016-09-09 12:55:57 -04:00
// RestartSwarmContainers restarts any autostart container which has a
// swarm endpoint.
func ( daemon * Daemon ) RestartSwarmContainers ( ) {
group := sync . WaitGroup { }
for _ , c := range daemon . List ( ) {
if ! c . IsRunning ( ) && ! c . IsPaused ( ) {
// Autostart all the containers which has a
// swarm endpoint now that the cluster is
// initialized.
if daemon . configStore . AutoRestart && c . ShouldRestart ( ) && c . NetworkSettings . HasSwarmEndpoint {
group . Add ( 1 )
go func ( c * container . Container ) {
defer group . Done ( )
2016-09-19 12:01:16 -04:00
if err := daemon . containerStart ( c , "" , "" , true ) ; err != nil {
2016-09-09 12:55:57 -04:00
logrus . Error ( err )
}
} ( c )
}
}
}
group . Wait ( )
}
2016-05-04 10:13:23 -04:00
// waitForNetworks is used during daemon initialization when starting up containers
// It ensures that all of a container's networks are available before the daemon tries to start the container.
// In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
func ( daemon * Daemon ) waitForNetworks ( c * container . Container ) {
if daemon . discoveryWatcher == nil {
return
}
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
for netName := range c . NetworkSettings . Networks {
2016-07-03 08:47:39 -04:00
// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
2016-05-04 10:13:23 -04:00
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
if _ , err := daemon . netController . NetworkByName ( netName ) ; err != nil {
if _ , ok := err . ( libnetwork . ErrNoSuchNetwork ) ; ! ok {
continue
}
// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
// FIXME: why is this slow???
logrus . Debugf ( "Container %s waiting for network to be ready" , c . Name )
select {
case <- daemon . discoveryWatcher . ReadyCh ( ) :
case <- time . After ( 60 * time . Second ) :
}
return
}
}
}
2015-09-03 20:51:04 -04:00
func ( daemon * Daemon ) children ( c * container . Container ) map [ string ] * container . Container {
return daemon . linkIndex . children ( c )
2013-10-04 22:25:15 -04:00
}
2015-07-30 17:01:53 -04:00
// parents returns the names of the parent containers of the container
// with the given name.
2015-09-03 20:51:04 -04:00
func ( daemon * Daemon ) parents ( c * container . Container ) map [ string ] * container . Container {
return daemon . linkIndex . parents ( c )
2014-07-14 19:19:37 -04:00
}
2015-11-12 14:55:17 -05:00
func ( daemon * Daemon ) registerLink ( parent , child * container . Container , alias string ) error {
2015-09-03 20:51:04 -04:00
fullName := path . Join ( parent . Name , alias )
if err := daemon . nameIndex . Reserve ( fullName , child . ID ) ; err != nil {
2016-01-20 16:24:16 -05:00
if err == registrar . ErrNameReserved {
logrus . Warnf ( "error registering link for %s, to %s, as alias %s, ignoring: %v" , parent . ID , child . ID , alias , err )
return nil
}
2013-10-04 22:25:15 -04:00
return err
}
2015-09-03 20:51:04 -04:00
daemon . linkIndex . link ( parent , child , fullName )
2013-10-28 19:58:59 -04:00
return nil
2013-10-04 22:25:15 -04:00
}
2016-07-03 13:58:11 -04:00
// SetClusterProvider sets a component for querying the current cluster state.
2016-06-13 22:52:49 -04:00
func ( daemon * Daemon ) SetClusterProvider ( clusterProvider cluster . Provider ) {
daemon . clusterProvider = clusterProvider
daemon . netController . SetClusterProvider ( clusterProvider )
}
2016-06-14 12:13:53 -04:00
// IsSwarmCompatible verifies if the current daemon
// configuration is compatible with the swarm mode
func ( daemon * Daemon ) IsSwarmCompatible ( ) error {
if daemon . configStore == nil {
return nil
}
return daemon . configStore . isSwarmCompatible ( )
}
2015-07-30 17:01:53 -04:00
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
2016-05-21 10:00:28 -04:00
func NewDaemon ( config * Config , registryService registry . Service , containerdRemote libcontainerd . Remote ) ( daemon * Daemon , err error ) {
2015-06-15 19:33:02 -04:00
setDefaultMtu ( config )
2016-06-01 20:29:06 -04:00
// Ensure that we have a correct root key limit for launching containers.
if err := ModifyRootKeyLimit ( ) ; err != nil {
2016-11-08 16:06:24 -05:00
logrus . Warnf ( "unable to modify root key limit, number of containers could be limited by this quota: %v" , err )
2016-06-01 20:29:06 -04:00
}
2016-01-22 21:15:09 -05:00
// Ensure we have compatible and valid configuration options
if err := verifyDaemonSettings ( config ) ; err != nil {
2015-05-15 19:34:26 -04:00
return nil , err
2014-09-16 23:00:15 -04:00
}
2015-05-15 19:34:26 -04:00
// Do we have a disabled network?
2015-06-30 13:34:15 -04:00
config . DisableBridge = isBridgeNetworkDisabled ( config )
2014-08-09 21:18:32 -04:00
2015-07-11 15:32:08 -04:00
// Verify the platform is supported as a daemon
2015-08-07 12:33:29 -04:00
if ! platformSupported {
2015-07-30 17:01:53 -04:00
return nil , errSystemNotSupported
2015-07-11 15:32:08 -04:00
}
// Validate platform-specific requirements
2015-05-15 19:34:26 -04:00
if err := checkSystem ( ) ; err != nil {
2014-09-16 13:42:59 -04:00
return nil , err
2014-07-30 02:51:43 -04:00
}
2015-10-08 11:51:41 -04:00
uidMaps , gidMaps , err := setupRemappedRoot ( config )
if err != nil {
return nil , err
}
rootUID , rootGID , err := idtools . GetRootUIDGID ( uidMaps , gidMaps )
if err != nil {
return nil , err
}
2016-07-11 18:26:23 -04:00
if err := setupDaemonProcess ( config ) ; err != nil {
2014-05-09 21:05:54 -04:00
return nil , err
}
2015-06-23 08:53:18 -04:00
// set up the tmpDir to use a canonical path
2015-10-08 11:51:41 -04:00
tmp , err := tempDir ( config . Root , rootUID , rootGID )
2015-06-23 08:53:18 -04:00
if err != nil {
return nil , fmt . Errorf ( "Unable to get the TempDir under %s: %s" , config . Root , err )
}
realTmp , err := fileutils . ReadSymlinkedDirectory ( tmp )
if err != nil {
return nil , fmt . Errorf ( "Unable to get the full path to the TempDir (%s): %s" , tmp , err )
}
os . Setenv ( "TMPDIR" , realTmp )
2016-03-18 14:50:19 -04:00
d := & Daemon { configStore : config }
2015-12-16 15:32:16 -05:00
// Ensure the daemon is properly shutdown if there is a failure during
// initialization
2015-04-27 17:11:29 -04:00
defer func ( ) {
if err != nil {
2015-09-29 13:51:40 -04:00
if err := d . Shutdown ( ) ; err != nil {
2015-04-27 17:11:29 -04:00
logrus . Error ( err )
}
2015-03-11 10:33:06 -04:00
}
2015-04-27 17:11:29 -04:00
} ( )
2013-11-07 15:34:01 -05:00
2016-09-02 09:20:54 -04:00
if err := d . setupSeccompProfile ( ) ; err != nil {
return nil , err
}
2016-03-18 14:50:19 -04:00
// Set the default isolation mode (only applicable on Windows)
if err := d . setDefaultIsolation ( ) ; err != nil {
return nil , fmt . Errorf ( "error setting default isolation mode: %v" , err )
}
2015-04-09 00:23:30 -04:00
logrus . Debugf ( "Using default logging driver %s" , config . LogConfig . Type )
2015-12-02 05:26:30 -05:00
if err := configureMaxThreads ( config ) ; err != nil {
logrus . Warnf ( "Failed to configure golang's threads limit: %v" , err )
}
2016-03-18 14:50:19 -04:00
installDefaultAppArmorProfile ( )
2015-05-15 19:34:26 -04:00
daemonRepo := filepath . Join ( config . Root , "containers" )
2015-10-08 11:51:41 -04:00
if err := idtools . MkdirAllAs ( daemonRepo , 0700 , rootUID , rootGID ) ; err != nil && ! os . IsExist ( err ) {
2013-03-13 21:48:50 -04:00
return nil , err
}
2016-06-07 15:15:50 -04:00
if runtime . GOOS == "windows" {
2016-11-04 15:42:21 -04:00
if err := system . MkdirAll ( filepath . Join ( config . Root , "credentialspecs" ) , 0 ) ; err != nil && ! os . IsExist ( err ) {
2016-06-07 15:15:50 -04:00
return nil , err
}
}
2015-12-16 15:32:16 -05:00
driverName := os . Getenv ( "DOCKER_DRIVER" )
if driverName == "" {
driverName = config . GraphDriver
}
2016-09-07 20:01:10 -04:00
2016-10-07 17:53:17 -04:00
d . PluginStore = pluginstore . NewStore ( config . Root )
2016-09-07 20:01:10 -04:00
2015-12-16 15:32:16 -05:00
d . layerStore , err = layer . NewStoreFromOptions ( layer . StoreOptions {
StorePath : config . Root ,
2015-12-30 01:18:12 -05:00
MetadataStorePathTemplate : filepath . Join ( config . Root , "image" , "%s" , "layerdb" ) ,
2015-12-16 15:32:16 -05:00
GraphDriver : driverName ,
GraphDriverOptions : config . GraphOptions ,
UIDMaps : uidMaps ,
GIDMaps : gidMaps ,
2016-10-07 17:53:17 -04:00
PluginGetter : d . PluginStore ,
2015-12-16 15:32:16 -05:00
} )
2015-11-18 17:20:54 -05:00
if err != nil {
return nil , err
}
2015-12-16 15:32:16 -05:00
graphDriver := d . layerStore . DriverName ( )
imageRoot := filepath . Join ( config . Root , "image" , graphDriver )
// Configure and validate the kernels security support
if err := configureKernelSecuritySupport ( config , graphDriver ) ; err != nil {
2015-11-18 17:20:54 -05:00
return nil , err
}
2016-05-06 00:45:55 -04:00
logrus . Debugf ( "Max Concurrent Downloads: %d" , * config . MaxConcurrentDownloads )
d . downloadManager = xfer . NewLayerDownloadManager ( d . layerStore , * config . MaxConcurrentDownloads )
logrus . Debugf ( "Max Concurrent Uploads: %d" , * config . MaxConcurrentUploads )
d . uploadManager = xfer . NewLayerUploadManager ( * config . MaxConcurrentUploads )
2015-11-18 17:20:54 -05:00
ifs , err := image . NewFSStoreBackend ( filepath . Join ( imageRoot , "imagedb" ) )
if err != nil {
return nil , err
}
d . imageStore , err = image . NewImageStore ( ifs , d . layerStore )
2013-02-26 20:45:46 -05:00
if err != nil {
return nil , err
}
2013-11-15 05:30:28 -05:00
2015-05-15 19:34:26 -04:00
// Configure the volumes driver
2016-05-16 11:50:55 -04:00
volStore , err := d . configureVolumes ( rootUID , rootGID )
2015-06-12 09:25:32 -04:00
if err != nil {
2013-04-05 21:00:10 -04:00
return nil , err
}
2014-08-28 10:18:08 -04:00
2015-01-07 17:59:12 -05:00
trustKey , err := api . LoadOrCreateTrustKey ( config . TrustKeyPath )
if err != nil {
return nil , err
}
2015-05-15 19:34:26 -04:00
trustDir := filepath . Join ( config . Root , "trust" )
Simplify and fix os.MkdirAll() usage
TL;DR: check for IsExist(err) after a failed MkdirAll() is both
redundant and wrong -- so two reasons to remove it.
Quoting MkdirAll documentation:
> MkdirAll creates a directory named path, along with any necessary
> parents, and returns nil, or else returns an error. If path
> is already a directory, MkdirAll does nothing and returns nil.
This means two things:
1. If a directory to be created already exists, no error is returned.
2. If the error returned is IsExist (EEXIST), it means there exists
a non-directory with the same name as MkdirAll need to use for
directory. Example: we want to MkdirAll("a/b"), but file "a"
(or "a/b") already exists, so MkdirAll fails.
The above is a theory, based on quoted documentation and my UNIX
knowledge.
3. In practice, though, current MkdirAll implementation [1] returns
ENOTDIR in most of cases described in #2, with the exception when
there is a race between MkdirAll and someone else creating the
last component of MkdirAll argument as a file. In this very case
MkdirAll() will indeed return EEXIST.
Because of #1, IsExist check after MkdirAll is not needed.
Because of #2 and #3, ignoring IsExist error is just plain wrong,
as directory we require is not created. It's cleaner to report
the error now.
Note this error is all over the tree, I guess due to copy-paste,
or trying to follow the same usage pattern as for Mkdir(),
or some not quite correct examples on the Internet.
[v2: a separate aufs commit is merged into this one]
[1] https://github.com/golang/go/blob/f9ed2f75/src/os/path.go
Signed-off-by: Kir Kolyshkin <kir@openvz.org>
2015-07-29 19:49:05 -04:00
if err := system . MkdirAll ( trustDir , 0700 ) ; err != nil {
2014-10-01 21:26:06 -04:00
return nil , err
}
2015-11-18 17:20:54 -05:00
distributionMetadataStore , err := dmetadata . NewFSMetadataStore ( filepath . Join ( imageRoot , "distribution" ) )
if err != nil {
return nil , err
}
2015-04-20 15:48:33 -04:00
eventsService := events . New ( )
2015-11-18 17:20:54 -05:00
2015-12-04 16:55:15 -05:00
referenceStore , err := reference . NewReferenceStore ( filepath . Join ( imageRoot , "repositories.json" ) )
2015-04-20 15:48:33 -04:00
if err != nil {
2015-11-18 17:20:54 -05:00
return nil , fmt . Errorf ( "Couldn't create Tag store repositories: %s" , err )
2015-04-20 15:48:33 -04:00
}
2015-11-29 22:55:22 -05:00
migrationStart := time . Now ( )
2015-12-16 15:32:16 -05:00
if err := v1 . Migrate ( config . Root , graphDriver , d . layerStore , d . imageStore , referenceStore , distributionMetadataStore ) ; err != nil {
2016-02-09 12:44:33 -05:00
logrus . Errorf ( "Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible." , err )
2015-07-24 20:49:43 -04:00
}
2015-11-29 22:55:22 -05:00
logrus . Infof ( "Graph migration to content-addressability took %.2f seconds" , time . Since ( migrationStart ) . Seconds ( ) )
2015-07-24 20:49:43 -04:00
2015-09-21 08:04:36 -04:00
// Discovery is only enabled when the daemon is launched with an address to advertise. When
// initialized, the daemon is registered and we can store the discovery backend as its read-only
2015-12-10 18:35:10 -05:00
if err := d . initDiscovery ( config ) ; err != nil {
return nil , err
2015-09-21 08:04:36 -04:00
}
2014-03-05 04:40:55 -05:00
sysInfo := sysinfo . New ( false )
2015-06-19 18:29:47 -04:00
// Check if Devices cgroup is mounted, it is hard requirement for container security,
2016-05-26 07:08:53 -04:00
// on Linux.
if runtime . GOOS == "linux" && ! sysInfo . CgroupDevicesEnabled {
2015-06-16 22:36:20 -04:00
return nil , fmt . Errorf ( "Devices cgroup isn't mounted" )
}
2015-04-27 17:11:29 -04:00
d . ID = trustKey . PublicKey ( ) . KeyID ( )
d . repository = daemonRepo
2016-01-15 18:55:46 -05:00
d . containers = container . NewMemoryStore ( )
2015-11-20 17:35:16 -05:00
d . execCommands = exec . NewStore ( )
2015-12-04 16:55:15 -05:00
d . referenceStore = referenceStore
2015-11-18 17:20:54 -05:00
d . distributionMetadataStore = distributionMetadataStore
d . trustKey = trustKey
2015-04-27 17:11:29 -04:00
d . idIndex = truncindex . NewTruncIndex ( [ ] string { } )
2015-11-03 14:06:16 -05:00
d . statsCollector = d . newStatsCollector ( 1 * time . Second )
2015-12-10 18:35:10 -05:00
d . defaultLogConfig = containertypes . LogConfig {
Type : config . LogConfig . Type ,
Config : config . LogConfig . Config ,
}
2015-04-27 17:11:29 -04:00
d . RegistryService = registryService
d . EventsService = eventsService
2015-06-12 09:25:32 -04:00
d . volumes = volStore
2015-05-19 16:05:25 -04:00
d . root = config . Root
2015-10-08 11:51:41 -04:00
d . uidMaps = uidMaps
d . gidMaps = gidMaps
2016-01-11 14:44:34 -05:00
d . seccompEnabled = sysInfo . Seccomp
2015-04-27 17:11:29 -04:00
2015-09-03 20:51:04 -04:00
d . nameIndex = registrar . NewRegistrar ( )
d . linkIndex = newLinkIndex ( )
2016-06-02 14:10:55 -04:00
d . containerdRemote = containerdRemote
2015-09-03 20:51:04 -04:00
2016-03-18 14:50:19 -04:00
go d . execCommandGC ( )
d . containerd , err = containerdRemote . Client ( d )
if err != nil {
2015-03-06 15:44:31 -05:00
return nil , err
}
2015-04-27 17:11:29 -04:00
2016-10-03 22:29:21 -04:00
// Plugin system initialization should happen before restore. Do not change order.
2016-10-06 10:09:54 -04:00
if err := d . pluginInit ( config , containerdRemote ) ; err != nil {
2015-03-06 15:44:31 -05:00
return nil , err
}
2016-09-06 17:30:55 -04:00
if err := d . restore ( ) ; err != nil {
2016-07-18 11:02:12 -04:00
return nil , err
}
2016-07-20 19:11:28 -04:00
// FIXME: this method never returns an error
info , _ := d . SystemInfo ( )
engineVersion . WithValues (
dockerversion . Version ,
dockerversion . GitCommit ,
info . Architecture ,
info . Driver ,
info . KernelVersion ,
info . OperatingSystem ,
) . Set ( 1 )
engineCpus . Set ( float64 ( info . NCPU ) )
engineMemory . Set ( float64 ( info . MemTotal ) )
2016-10-12 18:29:47 -04:00
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
d . setupDumpStackTrap ( config . Root )
2015-05-06 18:39:29 -04:00
return d , nil
}
2015-11-12 14:55:17 -05:00
func ( daemon * Daemon ) shutdownContainer ( c * container . Container ) error {
2016-06-06 23:29:05 -04:00
stopTimeout := c . StopTimeout ( )
2015-10-29 18:11:35 -04:00
// TODO(windows): Handle docker restart with paused containers
2015-11-12 14:55:17 -05:00
if c . IsPaused ( ) {
2015-10-29 18:11:35 -04:00
// To terminate a process in freezer cgroup, we should send
// SIGTERM to this process then unfreeze it, and the process will
// force to terminate immediately.
2016-07-03 08:47:39 -04:00
logrus . Debugf ( "Found container %s is paused, sending SIGTERM before unpausing it" , c . ID )
2015-10-29 18:11:35 -04:00
sig , ok := signal . SignalMap [ "TERM" ]
if ! ok {
2016-07-03 08:47:39 -04:00
return fmt . Errorf ( "System does not support SIGTERM" )
2015-10-29 18:11:35 -04:00
}
2015-11-02 18:39:39 -05:00
if err := daemon . kill ( c , int ( sig ) ) ; err != nil {
2015-10-29 18:11:35 -04:00
return fmt . Errorf ( "sending SIGTERM to container %s with error: %v" , c . ID , err )
}
2015-11-02 18:39:39 -05:00
if err := daemon . containerUnpause ( c ) ; err != nil {
2015-10-29 18:11:35 -04:00
return fmt . Errorf ( "Failed to unpause container %s with error: %v" , c . ID , err )
}
2016-06-06 23:29:05 -04:00
if _ , err := c . WaitStop ( time . Duration ( stopTimeout ) * time . Second ) ; err != nil {
logrus . Debugf ( "container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force" , c . ID , stopTimeout )
2015-10-29 18:11:35 -04:00
sig , ok := signal . SignalMap [ "KILL" ]
if ! ok {
return fmt . Errorf ( "System does not support SIGKILL" )
}
2015-11-02 18:39:39 -05:00
if err := daemon . kill ( c , int ( sig ) ) ; err != nil {
2015-10-29 18:11:35 -04:00
logrus . Errorf ( "Failed to SIGKILL container %s" , c . ID )
}
c . WaitStop ( - 1 * time . Second )
return err
}
}
2016-06-06 23:29:05 -04:00
// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
if err := daemon . containerStop ( c , stopTimeout ) ; err != nil {
2016-07-03 08:47:39 -04:00
return fmt . Errorf ( "Failed to stop container %s with error: %v" , c . ID , err )
2015-10-29 18:11:35 -04:00
}
c . WaitStop ( - 1 * time . Second )
return nil
}
2016-05-26 17:07:30 -04:00
// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers,
// and is limited by daemon's ShutdownTimeout.
2016-06-06 23:29:05 -04:00
func ( daemon * Daemon ) ShutdownTimeout ( ) int {
2016-05-26 17:07:30 -04:00
// By default we use daemon's ShutdownTimeout.
shutdownTimeout := daemon . configStore . ShutdownTimeout
2016-06-06 23:29:05 -04:00
graceTimeout := 5
if daemon . containers != nil {
for _ , c := range daemon . containers . List ( ) {
if shutdownTimeout >= 0 {
stopTimeout := c . StopTimeout ( )
if stopTimeout < 0 {
shutdownTimeout = - 1
} else {
if stopTimeout + graceTimeout > shutdownTimeout {
shutdownTimeout = stopTimeout + graceTimeout
}
}
}
}
}
return shutdownTimeout
}
2015-07-30 17:01:53 -04:00
// Shutdown stops the daemon.
2015-09-29 13:51:40 -04:00
func ( daemon * Daemon ) Shutdown ( ) error {
2015-08-05 17:09:08 -04:00
daemon . shutdown = true
2016-06-02 14:10:55 -04:00
// Keep mounts and networking running on daemon shutdown if
// we are to keep containers running and restore them.
2016-07-22 11:53:26 -04:00
2016-07-27 11:30:15 -04:00
if daemon . configStore . LiveRestoreEnabled && daemon . containers != nil {
2016-07-07 22:19:48 -04:00
// check if there are any running containers, if none we should do some cleanup
if ls , err := daemon . Containers ( & types . ContainerListOptions { } ) ; len ( ls ) != 0 || err != nil {
return nil
}
2016-06-02 14:10:55 -04:00
}
2016-07-07 22:19:48 -04:00
2015-04-27 17:11:29 -04:00
if daemon . containers != nil {
2016-05-26 17:07:30 -04:00
logrus . Debugf ( "start clean shutdown of all containers with a %d seconds timeout..." , daemon . configStore . ShutdownTimeout )
2016-01-15 18:55:46 -05:00
daemon . containers . ApplyAll ( func ( c * container . Container ) {
if ! c . IsRunning ( ) {
return
2015-04-27 17:11:29 -04:00
}
2016-01-15 18:55:46 -05:00
logrus . Debugf ( "stopping %s" , c . ID )
if err := daemon . shutdownContainer ( c ) ; err != nil {
logrus . Errorf ( "Stop container error: %v" , err )
return
}
2016-03-18 14:50:19 -04:00
if mountid , err := daemon . layerStore . GetMountID ( c . ID ) ; err == nil {
daemon . cleanupMountsByID ( mountid )
}
2016-01-15 18:55:46 -05:00
logrus . Debugf ( "container stopped %s" , c . ID )
} )
2015-10-29 18:11:35 -04:00
}
2015-06-05 18:02:56 -04:00
2016-11-12 01:57:37 -05:00
// Shutdown plugins after containers. Don't change the order.
2016-10-06 10:09:54 -04:00
daemon . pluginShutdown ( )
2016-10-03 18:42:46 -04:00
2015-10-29 18:11:35 -04:00
// trigger libnetwork Stop only if it's initialized
if daemon . netController != nil {
daemon . netController . Stop ( )
2015-04-27 17:11:29 -04:00
}
2014-03-25 19:21:07 -04:00
2015-12-16 15:32:16 -05:00
if daemon . layerStore != nil {
if err := daemon . layerStore . Cleanup ( ) ; err != nil {
logrus . Errorf ( "Error during layer Store.Cleanup(): %v" , err )
2015-06-10 19:07:53 -04:00
}
}
2015-08-03 18:05:34 -04:00
if err := daemon . cleanupMounts ( ) ; err != nil {
return err
}
2014-03-25 19:21:07 -04:00
return nil
}
2015-11-12 14:55:17 -05:00
// Mount sets container.BaseFS
2015-07-30 17:01:53 -04:00
// (is it not set coming in? why is it unset?)
2015-11-12 14:55:17 -05:00
func ( daemon * Daemon ) Mount ( container * container . Container ) error {
2015-12-16 17:13:50 -05:00
dir , err := container . RWLayer . Mount ( container . GetMountLabel ( ) )
2015-11-18 17:20:54 -05:00
if err != nil {
return err
}
logrus . Debugf ( "container mounted via layerStore: %v" , dir )
2015-05-15 19:34:26 -04:00
2015-11-12 14:55:17 -05:00
if container . BaseFS != dir {
2015-05-15 19:34:26 -04:00
// The mount path reported by the graph driver should always be trusted on Windows, since the
// volume path for a given mounted layer may change over time. This should only be an error
// on non-Windows operating systems.
2015-11-12 14:55:17 -05:00
if container . BaseFS != "" && runtime . GOOS != "windows" {
2015-11-18 17:20:54 -05:00
daemon . Unmount ( container )
2015-05-15 19:34:26 -04:00
return fmt . Errorf ( "Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')" ,
2015-12-16 15:32:16 -05:00
daemon . GraphDriverName ( ) , container . ID , container . BaseFS , dir )
2015-05-15 19:34:26 -04:00
}
2013-10-31 21:07:54 -04:00
}
2015-11-12 14:55:17 -05:00
container . BaseFS = dir // TODO: combine these fields
2013-11-07 15:34:01 -05:00
return nil
2013-10-31 21:07:54 -04:00
}
2015-11-02 20:06:09 -05:00
// Unmount unsets the container base filesystem
2016-03-18 14:50:19 -04:00
func ( daemon * Daemon ) Unmount ( container * container . Container ) error {
2015-12-16 17:13:50 -05:00
if err := container . RWLayer . Unmount ( ) ; err != nil {
2015-11-18 17:20:54 -05:00
logrus . Errorf ( "Error unmounting container %s: %s" , container . ID , err )
2016-03-18 14:50:19 -04:00
return err
2015-11-18 17:20:54 -05:00
}
2016-10-19 12:22:02 -04:00
2016-03-18 14:50:19 -04:00
return nil
2014-01-10 17:26:29 -05:00
}
2016-06-30 21:07:35 -04:00
// V4Subnets returns the IPv4 subnets of networks that are managed by Docker.
func ( daemon * Daemon ) V4Subnets ( ) [ ] net . IPNet {
var subnets [ ] net . IPNet
managedNetworks := daemon . netController . Networks ( )
for _ , managedNetwork := range managedNetworks {
v4Infos , _ := managedNetwork . Info ( ) . IpamInfo ( )
for _ , v4Info := range v4Infos {
if v4Info . IPAMData . Pool != nil {
subnets = append ( subnets , * v4Info . IPAMData . Pool )
}
}
}
return subnets
}
// V6Subnets returns the IPv6 subnets of networks that are managed by Docker.
func ( daemon * Daemon ) V6Subnets ( ) [ ] net . IPNet {
var subnets [ ] net . IPNet
managedNetworks := daemon . netController . Networks ( )
for _ , managedNetwork := range managedNetworks {
_ , v6Infos := managedNetwork . Info ( ) . IpamInfo ( )
for _ , v6Info := range v6Infos {
if v6Info . IPAMData . Pool != nil {
subnets = append ( subnets , * v6Info . IPAMData . Pool )
}
}
}
return subnets
}
2015-11-13 19:59:01 -05:00
func writeDistributionProgress ( cancelFunc func ( ) , outStream io . Writer , progressChan <- chan progress . Progress ) {
progressOutput := streamformatter . NewJSONStreamFormatter ( ) . NewProgressOutput ( outStream , false )
operationCancelled := false
for prog := range progressChan {
if err := progressOutput . WriteProgress ( prog ) ; err != nil && ! operationCancelled {
2015-12-17 12:35:24 -05:00
// don't log broken pipe errors as this is the normal case when a client aborts
if isBrokenPipe ( err ) {
logrus . Info ( "Pull session cancelled" )
} else {
logrus . Errorf ( "error writing progress to client: %v" , err )
}
2015-11-13 19:59:01 -05:00
cancelFunc ( )
operationCancelled = true
// Don't return, because we need to continue draining
// progressChan until it's closed to avoid a deadlock.
}
}
}
2015-12-17 12:35:24 -05:00
func isBrokenPipe ( e error ) bool {
if netErr , ok := e . ( * net . OpError ) ; ok {
e = netErr . Err
if sysErr , ok := netErr . Err . ( * os . SyscallError ) ; ok {
e = sysErr . Err
}
}
return e == syscall . EPIPE
}
2015-12-16 15:32:16 -05:00
// GraphDriverName returns the name of the graph driver used by the layer.Store
func ( daemon * Daemon ) GraphDriverName ( ) string {
return daemon . layerStore . DriverName ( )
2014-03-07 21:42:29 -05:00
}
2015-10-08 11:51:41 -04:00
// GetUIDGIDMaps returns the current daemon's user namespace settings
// for the full uid and gid maps which will be applied to containers
// started in this instance.
func ( daemon * Daemon ) GetUIDGIDMaps ( ) ( [ ] idtools . IDMap , [ ] idtools . IDMap ) {
return daemon . uidMaps , daemon . gidMaps
}
// GetRemappedUIDGID returns the current daemon's uid and gid values
// if user namespaces are in use for this daemon instance. If not
// this function will return "real" root values of 0, 0.
func ( daemon * Daemon ) GetRemappedUIDGID ( ) ( int , int ) {
uid , gid , _ := idtools . GetRootUIDGID ( daemon . uidMaps , daemon . gidMaps )
return uid , gid
}
2015-03-29 14:51:17 -04:00
// tempDir returns the default directory to use for temporary files.
2015-10-08 11:51:41 -04:00
func tempDir ( rootDir string , rootUID , rootGID int ) ( string , error ) {
2015-03-29 14:51:17 -04:00
var tmpDir string
if tmpDir = os . Getenv ( "DOCKER_TMPDIR" ) ; tmpDir == "" {
tmpDir = filepath . Join ( rootDir , "tmp" )
}
2015-10-08 11:51:41 -04:00
return tmpDir , idtools . MkdirAllAs ( tmpDir , 0700 , rootUID , rootGID )
2015-04-16 02:31:52 -04:00
}
2015-04-22 22:23:02 -04:00
2015-11-18 17:20:54 -05:00
func ( daemon * Daemon ) setupInitLayer ( initPath string ) error {
rootUID , rootGID := daemon . GetRemappedUIDGID ( )
return setupInitLayer ( initPath , rootUID , rootGID )
}
2015-06-15 19:33:02 -04:00
func setDefaultMtu ( config * Config ) {
// do nothing if the config does not have the default 0 value.
if config . Mtu != 0 {
return
}
config . Mtu = defaultNetworkMtu
}
2016-05-16 11:50:55 -04:00
func ( daemon * Daemon ) configureVolumes ( rootUID , rootGID int ) ( * store . VolumeStore , error ) {
volumesDriver , err := local . New ( daemon . configStore . Root , rootUID , rootGID )
2015-09-16 17:18:24 -04:00
if err != nil {
return nil , err
}
2015-09-18 19:58:05 -04:00
2016-10-07 17:53:17 -04:00
volumedrivers . RegisterPluginGetter ( daemon . PluginStore )
2016-09-07 20:01:10 -04:00
2016-04-11 11:17:52 -04:00
if ! volumedrivers . Register ( volumesDriver , volumesDriver . Name ( ) ) {
return nil , fmt . Errorf ( "local volume driver could not be registered" )
}
2016-05-16 11:50:55 -04:00
return store . New ( daemon . configStore . Root )
2015-09-16 17:18:24 -04:00
}
2015-10-08 19:16:36 -04:00
2015-11-03 14:25:22 -05:00
// IsShuttingDown tells whether the daemon is shutting down or not
func ( daemon * Daemon ) IsShuttingDown ( ) bool {
return daemon . shutdown
}
2015-12-10 18:35:10 -05:00
// initDiscovery initializes the discovery watcher for this daemon.
func ( daemon * Daemon ) initDiscovery ( config * Config ) error {
advertise , err := parseClusterAdvertiseSettings ( config . ClusterStore , config . ClusterAdvertise )
if err != nil {
if err == errDiscoveryDisabled {
return nil
}
return err
}
config . ClusterAdvertise = advertise
discoveryWatcher , err := initDiscovery ( config . ClusterStore , config . ClusterAdvertise , config . ClusterOpts )
if err != nil {
return fmt . Errorf ( "discovery initialization failed (%v)" , err )
}
daemon . discoveryWatcher = discoveryWatcher
return nil
}
// Reload reads configuration changes and modifies the
// daemon according to those changes.
2016-03-28 06:57:55 -04:00
// These are the settings that Reload changes:
2015-12-10 18:35:10 -05:00
// - Daemon labels.
2016-03-28 06:57:55 -04:00
// - Daemon debug log level.
2016-04-24 22:51:28 -04:00
// - Daemon insecure registries.
2016-05-06 00:45:55 -04:00
// - Daemon max concurrent downloads
// - Daemon max concurrent uploads
2016-02-17 20:08:11 -05:00
// - Cluster discovery (reconfigure and restart).
2016-06-02 14:10:55 -04:00
// - Daemon live restore
2016-05-26 17:07:30 -04:00
// - Daemon shutdown timeout (in seconds).
2016-10-10 12:25:44 -04:00
func ( daemon * Daemon ) Reload ( config * Config ) ( err error ) {
daemon . configStore . reloadLock . Lock ( )
attributes := daemon . platformReload ( config )
2016-05-23 17:49:50 -04:00
defer func ( ) {
2016-10-10 12:25:44 -04:00
// we're unlocking here, because
// LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes()
// holds that lock too.
daemon . configStore . reloadLock . Unlock ( )
2016-05-23 17:49:50 -04:00
if err == nil {
daemon . LogDaemonEventWithAttributes ( "reload" , attributes )
}
} ( )
2016-10-10 12:25:44 -04:00
if err := daemon . reloadClusterDiscovery ( config ) ; err != nil {
2016-05-08 19:11:34 -04:00
return err
}
2016-04-25 09:57:28 -04:00
if config . IsValueSet ( "labels" ) {
2016-02-24 03:13:44 -05:00
daemon . configStore . Labels = config . Labels
}
if config . IsValueSet ( "debug" ) {
daemon . configStore . Debug = config . Debug
}
2016-04-24 22:51:28 -04:00
if config . IsValueSet ( "insecure-registries" ) {
daemon . configStore . InsecureRegistries = config . InsecureRegistries
if err := daemon . RegistryService . LoadInsecureRegistries ( config . InsecureRegistries ) ; err != nil {
return err
}
}
2016-06-02 14:10:55 -04:00
if config . IsValueSet ( "live-restore" ) {
2016-07-27 11:30:15 -04:00
daemon . configStore . LiveRestoreEnabled = config . LiveRestoreEnabled
if err := daemon . containerdRemote . UpdateOptions ( libcontainerd . WithLiveRestore ( config . LiveRestoreEnabled ) ) ; err != nil {
2016-06-02 14:10:55 -04:00
return err
}
}
2016-05-06 00:45:55 -04:00
// If no value is set for max-concurrent-downloads we assume it is the default value
// We always "reset" as the cost is lightweight and easy to maintain.
if config . IsValueSet ( "max-concurrent-downloads" ) && config . MaxConcurrentDownloads != nil {
* daemon . configStore . MaxConcurrentDownloads = * config . MaxConcurrentDownloads
} else {
maxConcurrentDownloads := defaultMaxConcurrentDownloads
daemon . configStore . MaxConcurrentDownloads = & maxConcurrentDownloads
}
logrus . Debugf ( "Reset Max Concurrent Downloads: %d" , * daemon . configStore . MaxConcurrentDownloads )
if daemon . downloadManager != nil {
daemon . downloadManager . SetConcurrency ( * daemon . configStore . MaxConcurrentDownloads )
}
// If no value is set for max-concurrent-upload we assume it is the default value
// We always "reset" as the cost is lightweight and easy to maintain.
if config . IsValueSet ( "max-concurrent-uploads" ) && config . MaxConcurrentUploads != nil {
* daemon . configStore . MaxConcurrentUploads = * config . MaxConcurrentUploads
} else {
maxConcurrentUploads := defaultMaxConcurrentUploads
daemon . configStore . MaxConcurrentUploads = & maxConcurrentUploads
}
logrus . Debugf ( "Reset Max Concurrent Uploads: %d" , * daemon . configStore . MaxConcurrentUploads )
if daemon . uploadManager != nil {
daemon . uploadManager . SetConcurrency ( * daemon . configStore . MaxConcurrentUploads )
}
2016-05-26 17:07:30 -04:00
if config . IsValueSet ( "shutdown-timeout" ) {
daemon . configStore . ShutdownTimeout = config . ShutdownTimeout
logrus . Debugf ( "Reset Shutdown Timeout: %d" , daemon . configStore . ShutdownTimeout )
}
2016-05-08 19:11:34 -04:00
// We emit daemon reload event here with updatable configurations
attributes [ "debug" ] = fmt . Sprintf ( "%t" , daemon . configStore . Debug )
2016-07-24 16:02:45 -04:00
attributes [ "live-restore" ] = fmt . Sprintf ( "%t" , daemon . configStore . LiveRestoreEnabled )
2016-04-24 22:51:28 -04:00
if daemon . configStore . InsecureRegistries != nil {
insecureRegistries , err := json . Marshal ( daemon . configStore . InsecureRegistries )
if err != nil {
return err
}
attributes [ "insecure-registries" ] = string ( insecureRegistries )
} else {
attributes [ "insecure-registries" ] = "[]"
}
2016-05-08 19:11:34 -04:00
attributes [ "cluster-store" ] = daemon . configStore . ClusterStore
if daemon . configStore . ClusterOpts != nil {
2016-04-24 22:51:28 -04:00
opts , err := json . Marshal ( daemon . configStore . ClusterOpts )
if err != nil {
return err
}
2016-05-08 19:11:34 -04:00
attributes [ "cluster-store-opts" ] = string ( opts )
} else {
attributes [ "cluster-store-opts" ] = "{}"
}
attributes [ "cluster-advertise" ] = daemon . configStore . ClusterAdvertise
2016-04-24 22:51:28 -04:00
2016-05-08 19:11:34 -04:00
if daemon . configStore . Labels != nil {
2016-04-24 22:51:28 -04:00
labels , err := json . Marshal ( daemon . configStore . Labels )
if err != nil {
return err
}
2016-05-08 19:11:34 -04:00
attributes [ "labels" ] = string ( labels )
} else {
attributes [ "labels" ] = "[]"
}
2016-04-24 22:51:28 -04:00
2016-05-08 19:15:33 -04:00
attributes [ "max-concurrent-downloads" ] = fmt . Sprintf ( "%d" , * daemon . configStore . MaxConcurrentDownloads )
attributes [ "max-concurrent-uploads" ] = fmt . Sprintf ( "%d" , * daemon . configStore . MaxConcurrentUploads )
2016-05-26 17:07:30 -04:00
attributes [ "shutdown-timeout" ] = fmt . Sprintf ( "%d" , daemon . configStore . ShutdownTimeout )
2016-05-08 19:11:34 -04:00
return nil
2015-12-10 18:35:10 -05:00
}
func ( daemon * Daemon ) reloadClusterDiscovery ( config * Config ) error {
2016-02-24 03:13:44 -05:00
var err error
newAdvertise := daemon . configStore . ClusterAdvertise
newClusterStore := daemon . configStore . ClusterStore
if config . IsValueSet ( "cluster-advertise" ) {
if config . IsValueSet ( "cluster-store" ) {
newClusterStore = config . ClusterStore
}
newAdvertise , err = parseClusterAdvertiseSettings ( newClusterStore , config . ClusterAdvertise )
if err != nil && err != errDiscoveryDisabled {
return err
}
2015-12-10 18:35:10 -05:00
}
2016-06-14 12:13:53 -04:00
if daemon . clusterProvider != nil {
if err := config . isSwarmCompatible ( ) ; err != nil {
return err
}
}
2015-12-10 18:35:10 -05:00
// check discovery modifications
2016-02-24 03:13:44 -05:00
if ! modifiedDiscoverySettings ( daemon . configStore , newAdvertise , newClusterStore , config . ClusterOpts ) {
2015-12-10 18:35:10 -05:00
return nil
}
// enable discovery for the first time if it was not previously enabled
if daemon . discoveryWatcher == nil {
2016-02-24 03:13:44 -05:00
discoveryWatcher , err := initDiscovery ( newClusterStore , newAdvertise , config . ClusterOpts )
2015-12-10 18:35:10 -05:00
if err != nil {
return fmt . Errorf ( "discovery initialization failed (%v)" , err )
}
daemon . discoveryWatcher = discoveryWatcher
} else {
if err == errDiscoveryDisabled {
// disable discovery if it was previously enabled and it's disabled now
daemon . discoveryWatcher . Stop ( )
} else {
// reload discovery
if err = daemon . discoveryWatcher . Reload ( config . ClusterStore , newAdvertise , config . ClusterOpts ) ; err != nil {
return err
}
}
}
2016-02-24 03:13:44 -05:00
daemon . configStore . ClusterStore = newClusterStore
2015-12-10 18:35:10 -05:00
daemon . configStore . ClusterOpts = config . ClusterOpts
daemon . configStore . ClusterAdvertise = newAdvertise
2016-02-17 20:08:11 -05:00
if daemon . netController == nil {
return nil
}
2016-10-07 17:53:17 -04:00
netOptions , err := daemon . networkOptions ( daemon . configStore , daemon . PluginStore , nil )
2016-02-17 20:08:11 -05:00
if err != nil {
2016-09-09 05:04:06 -04:00
logrus . WithError ( err ) . Warnf ( "failed to get options with network controller" )
2016-02-17 20:08:11 -05:00
return nil
}
err = daemon . netController . ReloadConfiguration ( netOptions ... )
if err != nil {
logrus . Warnf ( "Failed to reload configuration with network controller: %v" , err )
}
2015-12-10 18:35:10 -05:00
return nil
}
2016-03-09 23:33:21 -05:00
func isBridgeNetworkDisabled ( config * Config ) bool {
return config . bridgeConfig . Iface == disableNetworkBridge
}
2016-09-26 13:08:52 -04:00
func ( daemon * Daemon ) networkOptions ( dconfig * Config , pg plugingetter . PluginGetter , activeSandboxes map [ string ] interface { } ) ( [ ] nwconfig . Option , error ) {
2016-03-09 23:33:21 -05:00
options := [ ] nwconfig . Option { }
if dconfig == nil {
return options , nil
}
options = append ( options , nwconfig . OptionDataDir ( dconfig . Root ) )
2016-07-21 19:13:10 -04:00
options = append ( options , nwconfig . OptionExecRoot ( dconfig . GetExecRoot ( ) ) )
2016-03-09 23:33:21 -05:00
dd := runconfig . DefaultDaemonNetworkMode ( )
dn := runconfig . DefaultDaemonNetworkMode ( ) . NetworkName ( )
options = append ( options , nwconfig . OptionDefaultDriver ( string ( dd ) ) )
options = append ( options , nwconfig . OptionDefaultNetwork ( dn ) )
if strings . TrimSpace ( dconfig . ClusterStore ) != "" {
kv := strings . Split ( dconfig . ClusterStore , "://" )
if len ( kv ) != 2 {
return nil , fmt . Errorf ( "kv store daemon config must be of the form KV-PROVIDER://KV-URL" )
}
options = append ( options , nwconfig . OptionKVProvider ( kv [ 0 ] ) )
options = append ( options , nwconfig . OptionKVProviderURL ( kv [ 1 ] ) )
}
if len ( dconfig . ClusterOpts ) > 0 {
options = append ( options , nwconfig . OptionKVOpts ( dconfig . ClusterOpts ) )
}
if daemon . discoveryWatcher != nil {
options = append ( options , nwconfig . OptionDiscoveryWatcher ( daemon . discoveryWatcher ) )
}
if dconfig . ClusterAdvertise != "" {
options = append ( options , nwconfig . OptionDiscoveryAddress ( dconfig . ClusterAdvertise ) )
}
options = append ( options , nwconfig . OptionLabels ( dconfig . Labels ) )
options = append ( options , driverOptions ( dconfig ) ... )
2016-06-14 12:13:53 -04:00
2016-07-27 11:30:15 -04:00
if daemon . configStore != nil && daemon . configStore . LiveRestoreEnabled && len ( activeSandboxes ) != 0 {
2016-06-14 12:13:53 -04:00
options = append ( options , nwconfig . OptionActiveSandboxes ( activeSandboxes ) )
}
2016-09-26 13:08:52 -04:00
if pg != nil {
options = append ( options , nwconfig . OptionPluginGetter ( pg ) )
}
2016-03-09 23:33:21 -05:00
return options , nil
}
2016-03-18 14:50:19 -04:00
func copyBlkioEntry ( entries [ ] * containerd . BlkioStatsEntry ) [ ] types . BlkioStatEntry {
out := make ( [ ] types . BlkioStatEntry , len ( entries ) )
for i , re := range entries {
out [ i ] = types . BlkioStatEntry {
Major : re . Major ,
Minor : re . Minor ,
Op : re . Op ,
Value : re . Value ,
}
}
return out
}
2016-10-18 00:36:52 -04:00
// GetCluster returns the cluster
func ( daemon * Daemon ) GetCluster ( ) Cluster {
return daemon . cluster
}
// SetCluster sets the cluster
func ( daemon * Daemon ) SetCluster ( cluster Cluster ) {
daemon . cluster = cluster
}
2016-11-09 20:49:09 -05:00
func ( daemon * Daemon ) pluginInit ( cfg * Config , remote libcontainerd . Remote ) error {
return plugin . Init ( cfg . Root , daemon . PluginStore , remote , daemon . RegistryService , cfg . LiveRestoreEnabled , daemon . LogPluginEvent )
}
func ( daemon * Daemon ) pluginShutdown ( ) {
manager := plugin . GetManager ( )
// Check for a valid manager object. In error conditions, daemon init can fail
// and shutdown called, before plugin manager is initialized.
if manager != nil {
manager . Shutdown ( )
}
}
2016-11-04 15:42:21 -04:00
// CreateDaemonRoot creates the root for the daemon
func CreateDaemonRoot ( config * Config ) error {
// get the canonical path to the Docker root directory
var realRoot string
if _ , err := os . Stat ( config . Root ) ; err != nil && os . IsNotExist ( err ) {
realRoot = config . Root
} else {
realRoot , err = fileutils . ReadSymlinkedDirectory ( config . Root )
if err != nil {
return fmt . Errorf ( "Unable to get the full path to root (%s): %s" , config . Root , err )
}
}
uidMaps , gidMaps , err := setupRemappedRoot ( config )
if err != nil {
return err
}
rootUID , rootGID , err := idtools . GetRootUIDGID ( uidMaps , gidMaps )
if err != nil {
return err
}
if err := setupDaemonRoot ( config , realRoot , rootUID , rootGID ) ; err != nil {
return err
}
return nil
}