2018-02-05 16:05:59 -05:00
|
|
|
package daemon // import "github.com/docker/docker/daemon"
|
2014-07-31 16:46:18 -04:00
|
|
|
|
|
|
|
import (
|
2017-09-22 09:52:41 -04:00
|
|
|
"context"
|
2015-07-16 18:33:13 -04:00
|
|
|
"runtime"
|
2016-07-20 19:11:28 -04:00
|
|
|
"time"
|
2015-03-25 03:44:12 -04:00
|
|
|
|
2016-09-06 14:18:12 -04:00
|
|
|
"github.com/docker/docker/api/types"
|
|
|
|
containertypes "github.com/docker/docker/api/types/container"
|
2015-11-12 14:55:17 -05:00
|
|
|
"github.com/docker/docker/container"
|
2018-01-11 14:53:06 -05:00
|
|
|
"github.com/docker/docker/errdefs"
|
2017-12-18 16:02:23 -05:00
|
|
|
"github.com/docker/docker/pkg/mount"
|
2017-07-19 10:20:13 -04:00
|
|
|
"github.com/pkg/errors"
|
2017-07-26 17:42:13 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2014-07-31 16:46:18 -04:00
|
|
|
)
|
|
|
|
|
2015-07-30 17:01:53 -04:00
|
|
|
// ContainerStart starts a container.
|
2016-11-30 13:22:07 -05:00
|
|
|
func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error {
|
2016-10-27 20:43:57 -04:00
|
|
|
if checkpoint != "" && !daemon.HasExperimental() {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode"))
|
2016-10-27 20:43:57 -04:00
|
|
|
}
|
|
|
|
|
2015-12-11 12:39:28 -05:00
|
|
|
container, err := daemon.GetContainer(name)
|
2014-12-16 18:06:35 -05:00
|
|
|
if err != nil {
|
2015-03-25 03:44:12 -04:00
|
|
|
return err
|
2014-07-31 16:46:18 -04:00
|
|
|
}
|
|
|
|
|
2017-07-19 10:20:13 -04:00
|
|
|
validateState := func() error {
|
|
|
|
container.Lock()
|
|
|
|
defer container.Unlock()
|
|
|
|
|
|
|
|
if container.Paused {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead"))
|
2017-07-19 10:20:13 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if container.Running {
|
|
|
|
return containerNotModifiedError{running: true}
|
|
|
|
}
|
|
|
|
|
|
|
|
if container.RemovalInProgress || container.Dead {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
|
2017-07-19 10:20:13 -04:00
|
|
|
}
|
|
|
|
return nil
|
2015-01-14 19:44:53 -05:00
|
|
|
}
|
|
|
|
|
2017-07-19 10:20:13 -04:00
|
|
|
if err := validateState(); err != nil {
|
|
|
|
return err
|
2014-07-31 16:46:18 -04:00
|
|
|
}
|
|
|
|
|
2015-08-07 18:24:18 -04:00
|
|
|
// Windows does not have the backwards compatibility issue here.
|
2015-07-16 18:33:13 -04:00
|
|
|
if runtime.GOOS != "windows" {
|
|
|
|
// This is kept for backward compatibility - hostconfig should be passed when
|
|
|
|
// creating a container, not during start.
|
|
|
|
if hostConfig != nil {
|
2016-09-20 06:38:56 -04:00
|
|
|
logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
|
2016-01-14 01:58:54 -05:00
|
|
|
oldNetworkMode := container.HostConfig.NetworkMode
|
2015-12-21 14:23:20 -05:00
|
|
|
if err := daemon.setSecurityOptions(container, hostConfig); err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.InvalidParameter(err)
|
2015-12-21 14:23:20 -05:00
|
|
|
}
|
2016-03-12 07:50:37 -05:00
|
|
|
if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.InvalidParameter(err)
|
2016-03-12 07:50:37 -05:00
|
|
|
}
|
2015-09-29 13:51:40 -04:00
|
|
|
if err := daemon.setHostConfig(container, hostConfig); err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.InvalidParameter(err)
|
2015-07-16 18:33:13 -04:00
|
|
|
}
|
2016-01-14 01:58:54 -05:00
|
|
|
newNetworkMode := container.HostConfig.NetworkMode
|
|
|
|
if string(oldNetworkMode) != string(newNetworkMode) {
|
|
|
|
// if user has change the network mode on starting, clean up the
|
2016-09-20 06:38:56 -04:00
|
|
|
// old networks. It is a deprecated feature and has been removed in Docker 1.12
|
2016-01-14 01:58:54 -05:00
|
|
|
container.NetworkSettings.Networks = nil
|
2017-03-27 13:18:53 -04:00
|
|
|
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(err)
|
2016-01-14 01:58:54 -05:00
|
|
|
}
|
|
|
|
}
|
2015-11-12 14:55:17 -05:00
|
|
|
container.InitDNSHostConfig()
|
2015-07-16 18:33:13 -04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if hostConfig != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.InvalidParameter(errors.New("Supplying a hostconfig on start is not supported. It should be supplied on create"))
|
2014-07-31 16:46:18 -04:00
|
|
|
}
|
|
|
|
}
|
2015-04-10 20:05:21 -04:00
|
|
|
|
2015-08-06 07:55:56 -04:00
|
|
|
// check if hostConfig is in line with the current system settings.
|
|
|
|
// It may happen cgroups are umounted or the like.
|
2017-08-08 15:43:48 -04:00
|
|
|
if _, err = daemon.verifyContainerSettings(container.OS, container.HostConfig, nil, false); err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.InvalidParameter(err)
|
2015-08-06 07:55:56 -04:00
|
|
|
}
|
2015-12-14 19:42:26 -05:00
|
|
|
// Adapt for old containers in case we have updates in this function and
|
|
|
|
// old containers never have chance to call the new function in create stage.
|
2016-11-03 12:44:40 -04:00
|
|
|
if hostConfig != nil {
|
|
|
|
if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.InvalidParameter(err)
|
2016-11-03 12:44:40 -04:00
|
|
|
}
|
2015-08-06 07:55:56 -04:00
|
|
|
}
|
2018-01-14 18:42:25 -05:00
|
|
|
return daemon.containerStart(container, checkpoint, checkpointDir, true)
|
2014-07-31 16:46:18 -04:00
|
|
|
}
|
2015-11-02 20:06:09 -05:00
|
|
|
|
|
|
|
// containerStart prepares the container to run by setting up everything the
|
|
|
|
// container needs, such as storage and networking, as well as links
|
|
|
|
// between containers. The container is left waiting for a signal to
|
|
|
|
// begin running.
|
2016-09-19 12:01:16 -04:00
|
|
|
func (daemon *Daemon) containerStart(container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (err error) {
|
2016-07-20 19:11:28 -04:00
|
|
|
start := time.Now()
|
2015-11-02 20:06:09 -05:00
|
|
|
container.Lock()
|
|
|
|
defer container.Unlock()
|
|
|
|
|
2016-10-05 16:29:56 -04:00
|
|
|
if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false
|
2015-11-02 20:06:09 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-11-12 14:55:17 -05:00
|
|
|
if container.RemovalInProgress || container.Dead {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
|
2015-11-02 20:06:09 -05:00
|
|
|
}
|
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
if checkpointDir != "" {
|
|
|
|
// TODO(mlaventure): how would we support that?
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.Forbidden(errors.New("custom checkpointdir is not supported"))
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
2015-11-02 20:06:09 -05:00
|
|
|
// if we encounter an error during start we need to ensure that any other
|
|
|
|
// setup has been cleaned up properly
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2015-11-12 14:55:17 -05:00
|
|
|
container.SetError(err)
|
2015-11-02 20:06:09 -05:00
|
|
|
// if no one else has set it, make sure we don't leave it at zero
|
2016-06-14 14:11:43 -04:00
|
|
|
if container.ExitCode() == 0 {
|
|
|
|
container.SetExitCode(128)
|
2015-11-02 20:06:09 -05:00
|
|
|
}
|
2017-03-27 13:18:53 -04:00
|
|
|
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
|
2017-02-23 18:12:18 -05:00
|
|
|
logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err)
|
2017-02-22 17:02:20 -05:00
|
|
|
}
|
2016-12-07 01:37:08 -05:00
|
|
|
container.Reset(false)
|
|
|
|
|
2015-11-03 12:43:36 -05:00
|
|
|
daemon.Cleanup(container)
|
2016-08-02 22:11:01 -04:00
|
|
|
// if containers AutoRemove flag is set, remove it after clean up
|
|
|
|
if container.HostConfig.AutoRemove {
|
|
|
|
container.Unlock()
|
|
|
|
if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
|
|
|
logrus.Errorf("can't remove container %s: %v", container.ID, err)
|
|
|
|
}
|
|
|
|
container.Lock()
|
|
|
|
}
|
2015-11-02 20:06:09 -05:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if err := daemon.conditionalMountOnStart(container); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-11-03 13:25:09 -05:00
|
|
|
if err := daemon.initializeNetworking(container); err != nil {
|
2015-11-02 20:06:09 -05:00
|
|
|
return err
|
|
|
|
}
|
2016-03-18 14:50:19 -04:00
|
|
|
|
|
|
|
spec, err := daemon.createSpec(container)
|
2015-11-02 20:06:09 -05:00
|
|
|
if err != nil {
|
2017-11-28 23:09:37 -05:00
|
|
|
return errdefs.System(err)
|
2015-11-02 20:06:09 -05:00
|
|
|
}
|
|
|
|
|
2016-10-05 16:29:56 -04:00
|
|
|
if resetRestartManager {
|
|
|
|
container.ResetRestartManager(true)
|
2017-11-01 02:15:02 -04:00
|
|
|
container.HasBeenManuallyStopped = false
|
2016-05-23 17:49:50 -04:00
|
|
|
}
|
|
|
|
|
2019-08-29 14:28:58 -04:00
|
|
|
if err := daemon.saveApparmorConfig(container); err != nil {
|
2017-09-22 09:52:41 -04:00
|
|
|
return err
|
2016-09-19 12:01:16 -04:00
|
|
|
}
|
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
if checkpoint != "" {
|
|
|
|
checkpointDir, err = getCheckpointDir(checkpointDir, checkpoint, container.Name, container.ID, container.CheckpointDir(), false)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
createOptions, err := daemon.getLibcontainerdCreateOptions(container)
|
|
|
|
if err != nil {
|
2016-12-19 07:22:45 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-12-12 15:04:09 -05:00
|
|
|
ctx := context.TODO()
|
|
|
|
|
|
|
|
err = daemon.containerd.Create(ctx, container.ID, spec, createOptions)
|
2017-09-22 09:52:41 -04:00
|
|
|
if err != nil {
|
2018-12-12 15:04:09 -05:00
|
|
|
if errdefs.IsConflict(err) {
|
|
|
|
logrus.WithError(err).WithField("container", container.ID).Error("Container not cleaned up from containerd from previous run")
|
|
|
|
// best effort to clean up old container object
|
|
|
|
daemon.containerd.DeleteTask(ctx, container.ID)
|
|
|
|
if err := daemon.containerd.Delete(ctx, container.ID); err != nil && !errdefs.IsNotFound(err) {
|
|
|
|
logrus.WithError(err).WithField("container", container.ID).Error("Error cleaning up stale containerd container object")
|
|
|
|
}
|
|
|
|
err = daemon.containerd.Create(ctx, container.ID, spec, createOptions)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return translateContainerdStartErr(container.Path, container.SetExitCode, err)
|
|
|
|
}
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(mlaventure): we need to specify checkpoint options here
|
|
|
|
pid, err := daemon.containerd.Start(context.Background(), container.ID, checkpointDir,
|
|
|
|
container.StreamConfig.Stdin() != nil || container.Config.Tty,
|
|
|
|
container.InitializeStdio)
|
|
|
|
if err != nil {
|
|
|
|
if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil {
|
|
|
|
logrus.WithError(err).WithField("container", container.ID).
|
|
|
|
Error("failed to delete failed start container")
|
|
|
|
}
|
2017-07-19 10:20:13 -04:00
|
|
|
return translateContainerdStartErr(container.Path, container.SetExitCode, err)
|
2017-09-22 09:52:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
container.SetRunning(pid, true)
|
|
|
|
container.HasBeenStartedBefore = true
|
|
|
|
daemon.setStateCounter(container)
|
|
|
|
|
|
|
|
daemon.initHealthMonitor(container)
|
2016-07-18 10:21:48 -04:00
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
|
|
|
|
logrus.WithError(err).WithField("container", container.ID).
|
|
|
|
Errorf("failed to store container")
|
2015-11-02 20:06:09 -05:00
|
|
|
}
|
|
|
|
|
2017-09-22 09:52:41 -04:00
|
|
|
daemon.LogContainerEvent(container, "start")
|
2016-07-20 19:11:28 -04:00
|
|
|
containerActions.WithValues("start").UpdateSince(start)
|
|
|
|
|
2015-11-14 14:06:19 -05:00
|
|
|
return nil
|
2015-11-03 12:33:13 -05:00
|
|
|
}
|
|
|
|
|
2015-11-03 12:43:36 -05:00
|
|
|
// Cleanup releases any network resources allocated to the container along with any rules
|
|
|
|
// around how containers are linked together. It also unmounts the container's root filesystem.
|
2015-11-12 14:55:17 -05:00
|
|
|
func (daemon *Daemon) Cleanup(container *container.Container) {
|
2015-11-03 12:43:36 -05:00
|
|
|
daemon.releaseNetwork(container)
|
|
|
|
|
2018-10-24 20:29:03 -04:00
|
|
|
if err := container.UnmountIpcMount(); err != nil {
|
Implement none, private, and shareable ipc modes
Since the commit d88fe447df0e8 ("Add support for sharing /dev/shm/ and
/dev/mqueue between containers") container's /dev/shm is mounted on the
host first, then bind-mounted inside the container. This is done that
way in order to be able to share this container's IPC namespace
(and the /dev/shm mount point) with another container.
Unfortunately, this functionality breaks container checkpoint/restore
(even if IPC is not shared). Since /dev/shm is an external mount, its
contents is not saved by `criu checkpoint`, and so upon restore any
application that tries to access data under /dev/shm is severily
disappointed (which usually results in a fatal crash).
This commit solves the issue by introducing new IPC modes for containers
(in addition to 'host' and 'container:ID'). The new modes are:
- 'shareable': enables sharing this container's IPC with others
(this used to be the implicit default);
- 'private': disables sharing this container's IPC.
In 'private' mode, container's /dev/shm is truly mounted inside the
container, without any bind-mounting from the host, which solves the
issue.
While at it, let's also implement 'none' mode. The motivation, as
eloquently put by Justin Cormack, is:
> I wondered a while back about having a none shm mode, as currently it is
> not possible to have a totally unwriteable container as there is always
> a /dev/shm writeable mount. It is a bit of a niche case (and clearly
> should never be allowed to be daemon default) but it would be trivial to
> add now so maybe we should...
...so here's yet yet another mode:
- 'none': no /dev/shm mount inside the container (though it still
has its own private IPC namespace).
Now, to ultimately solve the abovementioned checkpoint/restore issue, we'd
need to make 'private' the default mode, but unfortunately it breaks the
backward compatibility. So, let's make the default container IPC mode
per-daemon configurable (with the built-in default set to 'shareable'
for now). The default can be changed either via a daemon CLI option
(--default-shm-mode) or a daemon.json configuration file parameter
of the same name.
Note one can only set either 'shareable' or 'private' IPC modes as a
daemon default (i.e. in this context 'host', 'container', or 'none'
do not make much sense).
Some other changes this patch introduces are:
1. A mount for /dev/shm is added to default OCI Linux spec.
2. IpcMode.Valid() is simplified to remove duplicated code that parsed
'container:ID' form. Note the old version used to check that ID does
not contain a semicolon -- this is no longer the case (tests are
modified accordingly). The motivation is we should either do a
proper check for container ID validity, or don't check it at all
(since it is checked in other places anyway). I chose the latter.
3. IpcMode.Container() is modified to not return container ID if the
mode value does not start with "container:", unifying the check to
be the same as in IpcMode.IsContainer().
3. IPC mode unit tests (runconfig/hostconfig_test.go) are modified
to add checks for newly added values.
[v2: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-51345997]
[v3: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-53902833]
[v4: addressed the case of upgrading from older daemon, in this case
container.HostConfig.IpcMode is unset and this is valid]
[v5: document old and new IpcMode values in api/swagger.yaml]
[v6: add the 'none' mode, changelog entry to docs/api/version-history.md]
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2017-06-27 17:58:50 -04:00
|
|
|
logrus.Warnf("%s cleanup: failed to unmount IPC: %s", container.ID, err)
|
|
|
|
}
|
2015-11-03 12:43:36 -05:00
|
|
|
|
2016-03-18 14:50:19 -04:00
|
|
|
if err := daemon.conditionalUnmountOnCleanup(container); err != nil {
|
|
|
|
// FIXME: remove once reference counting for graphdrivers has been refactored
|
|
|
|
// Ensure that all the mounts are gone
|
2018-02-07 15:52:47 -05:00
|
|
|
if mountid, err := daemon.imageService.GetLayerMountID(container.ID, container.OS); err == nil {
|
2016-03-18 14:50:19 -04:00
|
|
|
daemon.cleanupMountsByID(mountid)
|
|
|
|
}
|
|
|
|
}
|
2015-11-03 12:43:36 -05:00
|
|
|
|
2016-10-19 12:22:02 -04:00
|
|
|
if err := container.UnmountSecrets(); err != nil {
|
|
|
|
logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err)
|
|
|
|
}
|
|
|
|
|
2017-12-18 16:02:23 -05:00
|
|
|
if err := mount.RecursiveUnmount(container.Root); err != nil {
|
|
|
|
logrus.WithError(err).WithField("container", container.ID).Warn("Error while cleaning up container resource mounts.")
|
|
|
|
}
|
|
|
|
|
2015-11-12 14:55:17 -05:00
|
|
|
for _, eConfig := range container.ExecCommands.Commands() {
|
2015-11-20 17:35:16 -05:00
|
|
|
daemon.unregisterExecCommand(container, eConfig)
|
2015-11-03 12:43:36 -05:00
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
if container.BaseFS != nil && container.BaseFS.Path() != "" {
|
2016-10-03 13:53:06 -04:00
|
|
|
if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil {
|
2016-03-29 18:27:04 -04:00
|
|
|
logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
|
|
|
|
}
|
2015-11-03 12:43:36 -05:00
|
|
|
}
|
2017-09-22 09:52:41 -04:00
|
|
|
|
2016-03-08 19:54:33 -05:00
|
|
|
container.CancelAttachContext()
|
2017-09-22 09:52:41 -04:00
|
|
|
|
|
|
|
if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil {
|
|
|
|
logrus.Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err)
|
|
|
|
}
|
2015-11-03 12:43:36 -05:00
|
|
|
}
|