2021-08-23 09:14:53 -04:00
|
|
|
//go:build !windows
|
2018-05-10 16:47:12 -04:00
|
|
|
// +build !windows
|
2015-11-12 14:55:17 -05:00
|
|
|
|
2018-02-05 16:05:59 -05:00
|
|
|
package container // import "github.com/docker/docker/container"
|
2015-11-12 14:55:17 -05:00
|
|
|
|
|
|
|
import (
|
|
|
|
"os"
|
2018-01-17 10:49:58 -05:00
|
|
|
"path/filepath"
|
2018-12-04 01:07:06 -05:00
|
|
|
"syscall"
|
2015-11-12 14:55:17 -05:00
|
|
|
|
2018-02-12 15:27:34 -05:00
|
|
|
"github.com/containerd/continuity/fs"
|
2017-02-22 12:11:10 -05:00
|
|
|
"github.com/docker/docker/api/types"
|
2016-09-06 14:18:12 -04:00
|
|
|
containertypes "github.com/docker/docker/api/types/container"
|
2016-09-22 16:14:15 -04:00
|
|
|
mounttypes "github.com/docker/docker/api/types/mount"
|
2018-01-17 10:49:58 -05:00
|
|
|
swarmtypes "github.com/docker/docker/api/types/swarm"
|
2016-03-07 21:41:44 -05:00
|
|
|
"github.com/docker/docker/pkg/stringid"
|
2015-11-12 14:55:17 -05:00
|
|
|
"github.com/docker/docker/volume"
|
2018-04-17 16:50:28 -04:00
|
|
|
volumemounts "github.com/docker/docker/volume/mounts"
|
2020-03-13 19:38:24 -04:00
|
|
|
"github.com/moby/sys/mount"
|
2017-04-18 09:26:36 -04:00
|
|
|
"github.com/opencontainers/selinux/go-selinux/label"
|
2018-12-12 11:48:48 -05:00
|
|
|
"github.com/pkg/errors"
|
2017-07-26 17:42:13 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2015-11-12 14:55:17 -05:00
|
|
|
)
|
|
|
|
|
2016-10-19 12:22:02 -04:00
|
|
|
const (
|
pkg/signal: remove DefaultStopSignal const
This const was previously living in pkg/signal, but with that package
being moved to its own module, it didn't make much sense to put docker's
defaults in a generic module.
The const from the "signal" package is currenlty used *both* by the CLI
and the daemon as a default value when creating containers. This put up
some questions:
a. should the default be non-exported, and private to the container
package? After all, it's a _default_ (so should be used if _NOT_ set).
b. should the client actually setting a default, or instead just omit
the value, unless specified by the user? having the client set a
default also means that the daemon cannot change the default value
because the client (or older clients) will override it.
c. consider defaults from the client and defaults of the daemon to be
separate things, and create a default const in the CLI.
This patch implements option "a" (option "b" will be done separately,
as it involves the CLI code). This still leaves "c" open as an option,
if the CLI wants to set its own default.
Unfortunately, this change means we'll have to drop the alias for the
deprecated pkg/signal.DefaultStopSignal const, but a comment was left
instead, which can assist consumers of the const to find why it's no
longer there (a search showed the Docker CLI as the only consumer though).
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2021-08-06 12:50:56 -04:00
|
|
|
// defaultStopSignal is the default syscall signal used to stop a container.
|
|
|
|
defaultStopSignal = "SIGTERM"
|
|
|
|
|
2021-08-06 04:24:09 -04:00
|
|
|
// defaultStopTimeout sets the default time, in seconds, to wait
|
2018-04-16 19:58:42 -04:00
|
|
|
// for the graceful container stop before forcefully terminating it.
|
2021-08-06 04:24:09 -04:00
|
|
|
defaultStopTimeout = 10
|
2017-10-12 19:31:33 -04:00
|
|
|
|
2021-05-01 15:41:34 -04:00
|
|
|
containerConfigMountPath = "/"
|
2016-12-25 04:11:12 -05:00
|
|
|
containerSecretMountPath = "/run/secrets"
|
2016-10-19 12:22:02 -04:00
|
|
|
)
|
2015-11-12 14:55:17 -05:00
|
|
|
|
|
|
|
// TrySetNetworkMount attempts to set the network mounts given a provided destination and
|
|
|
|
// the path to use for it; return true if the given destination was a network mount file
|
|
|
|
func (container *Container) TrySetNetworkMount(destination string, path string) bool {
|
|
|
|
if destination == "/etc/resolv.conf" {
|
|
|
|
container.ResolvConfPath = path
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if destination == "/etc/hostname" {
|
|
|
|
container.HostnamePath = path
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if destination == "/etc/hosts" {
|
|
|
|
container.HostsPath = path
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// BuildHostnameFile writes the container's hostname file.
|
|
|
|
func (container *Container) BuildHostnameFile() error {
|
|
|
|
hostnamePath, err := container.GetRootResourcePath("hostname")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
container.HostnamePath = hostnamePath
|
2021-08-24 06:10:50 -04:00
|
|
|
return os.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// NetworkMounts returns the list of network mounts.
|
2016-03-18 14:50:19 -04:00
|
|
|
func (container *Container) NetworkMounts() []Mount {
|
|
|
|
var mounts []Mount
|
2015-11-12 14:55:17 -05:00
|
|
|
shared := container.HostConfig.NetworkMode.IsContainer()
|
2021-06-11 15:01:18 -04:00
|
|
|
parser := volumemounts.NewParser()
|
2015-11-12 14:55:17 -05:00
|
|
|
if container.ResolvConfPath != "" {
|
|
|
|
if _, err := os.Stat(container.ResolvConfPath); err != nil {
|
|
|
|
logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err)
|
|
|
|
} else {
|
|
|
|
writable := !container.HostConfig.ReadonlyRootfs
|
|
|
|
if m, exists := container.MountPoints["/etc/resolv.conf"]; exists {
|
|
|
|
writable = m.RW
|
2017-11-10 18:41:02 -05:00
|
|
|
} else {
|
|
|
|
label.Relabel(container.ResolvConfPath, container.MountLabel, shared)
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
2016-03-18 14:50:19 -04:00
|
|
|
mounts = append(mounts, Mount{
|
2015-11-12 14:55:17 -05:00
|
|
|
Source: container.ResolvConfPath,
|
|
|
|
Destination: "/etc/resolv.conf",
|
|
|
|
Writable: writable,
|
2017-08-01 13:32:44 -04:00
|
|
|
Propagation: string(parser.DefaultPropagationMode()),
|
2015-11-12 14:55:17 -05:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if container.HostnamePath != "" {
|
|
|
|
if _, err := os.Stat(container.HostnamePath); err != nil {
|
|
|
|
logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err)
|
|
|
|
} else {
|
|
|
|
writable := !container.HostConfig.ReadonlyRootfs
|
|
|
|
if m, exists := container.MountPoints["/etc/hostname"]; exists {
|
|
|
|
writable = m.RW
|
2017-11-10 18:41:02 -05:00
|
|
|
} else {
|
|
|
|
label.Relabel(container.HostnamePath, container.MountLabel, shared)
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
2016-03-18 14:50:19 -04:00
|
|
|
mounts = append(mounts, Mount{
|
2015-11-12 14:55:17 -05:00
|
|
|
Source: container.HostnamePath,
|
|
|
|
Destination: "/etc/hostname",
|
|
|
|
Writable: writable,
|
2017-08-01 13:32:44 -04:00
|
|
|
Propagation: string(parser.DefaultPropagationMode()),
|
2015-11-12 14:55:17 -05:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if container.HostsPath != "" {
|
|
|
|
if _, err := os.Stat(container.HostsPath); err != nil {
|
|
|
|
logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err)
|
|
|
|
} else {
|
|
|
|
writable := !container.HostConfig.ReadonlyRootfs
|
|
|
|
if m, exists := container.MountPoints["/etc/hosts"]; exists {
|
|
|
|
writable = m.RW
|
2017-11-10 18:41:02 -05:00
|
|
|
} else {
|
|
|
|
label.Relabel(container.HostsPath, container.MountLabel, shared)
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
2016-03-18 14:50:19 -04:00
|
|
|
mounts = append(mounts, Mount{
|
2015-11-12 14:55:17 -05:00
|
|
|
Source: container.HostsPath,
|
|
|
|
Destination: "/etc/hosts",
|
|
|
|
Writable: writable,
|
2017-08-01 13:32:44 -04:00
|
|
|
Propagation: string(parser.DefaultPropagationMode()),
|
2015-11-12 14:55:17 -05:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return mounts
|
|
|
|
}
|
|
|
|
|
|
|
|
// CopyImagePathContent copies files in destination to the volume.
|
|
|
|
func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
|
2017-08-03 20:22:00 -04:00
|
|
|
rootfs, err := container.GetResourcePath(destination)
|
2015-11-12 14:55:17 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-03-22 17:11:03 -04:00
|
|
|
if _, err := os.Stat(rootfs); err != nil {
|
2015-11-12 14:55:17 -05:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-07 06:21:18 -04:00
|
|
|
id := stringid.GenerateRandomID()
|
2016-03-07 21:41:44 -05:00
|
|
|
path, err := v.Mount(id)
|
2015-11-12 14:55:17 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-03-07 21:41:44 -05:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err := v.Unmount(id); err != nil {
|
|
|
|
logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err)
|
|
|
|
}
|
|
|
|
}()
|
2020-05-10 10:19:42 -04:00
|
|
|
if err := label.Relabel(path, container.MountLabel, true); err != nil && !errors.Is(err, syscall.ENOTSUP) {
|
2016-05-26 11:11:28 -04:00
|
|
|
return err
|
|
|
|
}
|
2016-03-14 23:31:42 -04:00
|
|
|
return copyExistingContents(rootfs, path)
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// ShmResourcePath returns path to shm
|
|
|
|
func (container *Container) ShmResourcePath() (string, error) {
|
2017-12-18 16:02:23 -05:00
|
|
|
return container.MountsResourcePath("shm")
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// HasMountFor checks if path is a mountpoint
|
|
|
|
func (container *Container) HasMountFor(path string) bool {
|
|
|
|
_, exists := container.MountPoints[path]
|
2017-11-10 18:43:57 -05:00
|
|
|
if exists {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Also search among the tmpfs mounts
|
|
|
|
for dest := range container.HostConfig.Tmpfs {
|
|
|
|
if dest == path {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
|
|
|
|
2018-10-24 20:29:03 -04:00
|
|
|
// UnmountIpcMount unmounts shm if it was mounted
|
|
|
|
func (container *Container) UnmountIpcMount() error {
|
Implement none, private, and shareable ipc modes
Since the commit d88fe447df0e8 ("Add support for sharing /dev/shm/ and
/dev/mqueue between containers") container's /dev/shm is mounted on the
host first, then bind-mounted inside the container. This is done that
way in order to be able to share this container's IPC namespace
(and the /dev/shm mount point) with another container.
Unfortunately, this functionality breaks container checkpoint/restore
(even if IPC is not shared). Since /dev/shm is an external mount, its
contents is not saved by `criu checkpoint`, and so upon restore any
application that tries to access data under /dev/shm is severily
disappointed (which usually results in a fatal crash).
This commit solves the issue by introducing new IPC modes for containers
(in addition to 'host' and 'container:ID'). The new modes are:
- 'shareable': enables sharing this container's IPC with others
(this used to be the implicit default);
- 'private': disables sharing this container's IPC.
In 'private' mode, container's /dev/shm is truly mounted inside the
container, without any bind-mounting from the host, which solves the
issue.
While at it, let's also implement 'none' mode. The motivation, as
eloquently put by Justin Cormack, is:
> I wondered a while back about having a none shm mode, as currently it is
> not possible to have a totally unwriteable container as there is always
> a /dev/shm writeable mount. It is a bit of a niche case (and clearly
> should never be allowed to be daemon default) but it would be trivial to
> add now so maybe we should...
...so here's yet yet another mode:
- 'none': no /dev/shm mount inside the container (though it still
has its own private IPC namespace).
Now, to ultimately solve the abovementioned checkpoint/restore issue, we'd
need to make 'private' the default mode, but unfortunately it breaks the
backward compatibility. So, let's make the default container IPC mode
per-daemon configurable (with the built-in default set to 'shareable'
for now). The default can be changed either via a daemon CLI option
(--default-shm-mode) or a daemon.json configuration file parameter
of the same name.
Note one can only set either 'shareable' or 'private' IPC modes as a
daemon default (i.e. in this context 'host', 'container', or 'none'
do not make much sense).
Some other changes this patch introduces are:
1. A mount for /dev/shm is added to default OCI Linux spec.
2. IpcMode.Valid() is simplified to remove duplicated code that parsed
'container:ID' form. Note the old version used to check that ID does
not contain a semicolon -- this is no longer the case (tests are
modified accordingly). The motivation is we should either do a
proper check for container ID validity, or don't check it at all
(since it is checked in other places anyway). I chose the latter.
3. IpcMode.Container() is modified to not return container ID if the
mode value does not start with "container:", unifying the check to
be the same as in IpcMode.IsContainer().
3. IPC mode unit tests (runconfig/hostconfig_test.go) are modified
to add checks for newly added values.
[v2: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-51345997]
[v3: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-53902833]
[v4: addressed the case of upgrading from older daemon, in this case
container.HostConfig.IpcMode is unset and this is valid]
[v5: document old and new IpcMode values in api/swagger.yaml]
[v6: add the 'none' mode, changelog entry to docs/api/version-history.md]
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2017-06-27 17:58:50 -04:00
|
|
|
if container.HasMountFor("/dev/shm") {
|
|
|
|
return nil
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
|
|
|
|
Implement none, private, and shareable ipc modes
Since the commit d88fe447df0e8 ("Add support for sharing /dev/shm/ and
/dev/mqueue between containers") container's /dev/shm is mounted on the
host first, then bind-mounted inside the container. This is done that
way in order to be able to share this container's IPC namespace
(and the /dev/shm mount point) with another container.
Unfortunately, this functionality breaks container checkpoint/restore
(even if IPC is not shared). Since /dev/shm is an external mount, its
contents is not saved by `criu checkpoint`, and so upon restore any
application that tries to access data under /dev/shm is severily
disappointed (which usually results in a fatal crash).
This commit solves the issue by introducing new IPC modes for containers
(in addition to 'host' and 'container:ID'). The new modes are:
- 'shareable': enables sharing this container's IPC with others
(this used to be the implicit default);
- 'private': disables sharing this container's IPC.
In 'private' mode, container's /dev/shm is truly mounted inside the
container, without any bind-mounting from the host, which solves the
issue.
While at it, let's also implement 'none' mode. The motivation, as
eloquently put by Justin Cormack, is:
> I wondered a while back about having a none shm mode, as currently it is
> not possible to have a totally unwriteable container as there is always
> a /dev/shm writeable mount. It is a bit of a niche case (and clearly
> should never be allowed to be daemon default) but it would be trivial to
> add now so maybe we should...
...so here's yet yet another mode:
- 'none': no /dev/shm mount inside the container (though it still
has its own private IPC namespace).
Now, to ultimately solve the abovementioned checkpoint/restore issue, we'd
need to make 'private' the default mode, but unfortunately it breaks the
backward compatibility. So, let's make the default container IPC mode
per-daemon configurable (with the built-in default set to 'shareable'
for now). The default can be changed either via a daemon CLI option
(--default-shm-mode) or a daemon.json configuration file parameter
of the same name.
Note one can only set either 'shareable' or 'private' IPC modes as a
daemon default (i.e. in this context 'host', 'container', or 'none'
do not make much sense).
Some other changes this patch introduces are:
1. A mount for /dev/shm is added to default OCI Linux spec.
2. IpcMode.Valid() is simplified to remove duplicated code that parsed
'container:ID' form. Note the old version used to check that ID does
not contain a semicolon -- this is no longer the case (tests are
modified accordingly). The motivation is we should either do a
proper check for container ID validity, or don't check it at all
(since it is checked in other places anyway). I chose the latter.
3. IpcMode.Container() is modified to not return container ID if the
mode value does not start with "container:", unifying the check to
be the same as in IpcMode.IsContainer().
3. IPC mode unit tests (runconfig/hostconfig_test.go) are modified
to add checks for newly added values.
[v2: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-51345997]
[v3: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-53902833]
[v4: addressed the case of upgrading from older daemon, in this case
container.HostConfig.IpcMode is unset and this is valid]
[v5: document old and new IpcMode values in api/swagger.yaml]
[v6: add the 'none' mode, changelog entry to docs/api/version-history.md]
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2017-06-27 17:58:50 -04:00
|
|
|
// container.ShmPath should not be used here as it may point
|
|
|
|
// to the host's or other container's /dev/shm
|
|
|
|
shmPath, err := container.ShmResourcePath()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
Implement none, private, and shareable ipc modes
Since the commit d88fe447df0e8 ("Add support for sharing /dev/shm/ and
/dev/mqueue between containers") container's /dev/shm is mounted on the
host first, then bind-mounted inside the container. This is done that
way in order to be able to share this container's IPC namespace
(and the /dev/shm mount point) with another container.
Unfortunately, this functionality breaks container checkpoint/restore
(even if IPC is not shared). Since /dev/shm is an external mount, its
contents is not saved by `criu checkpoint`, and so upon restore any
application that tries to access data under /dev/shm is severily
disappointed (which usually results in a fatal crash).
This commit solves the issue by introducing new IPC modes for containers
(in addition to 'host' and 'container:ID'). The new modes are:
- 'shareable': enables sharing this container's IPC with others
(this used to be the implicit default);
- 'private': disables sharing this container's IPC.
In 'private' mode, container's /dev/shm is truly mounted inside the
container, without any bind-mounting from the host, which solves the
issue.
While at it, let's also implement 'none' mode. The motivation, as
eloquently put by Justin Cormack, is:
> I wondered a while back about having a none shm mode, as currently it is
> not possible to have a totally unwriteable container as there is always
> a /dev/shm writeable mount. It is a bit of a niche case (and clearly
> should never be allowed to be daemon default) but it would be trivial to
> add now so maybe we should...
...so here's yet yet another mode:
- 'none': no /dev/shm mount inside the container (though it still
has its own private IPC namespace).
Now, to ultimately solve the abovementioned checkpoint/restore issue, we'd
need to make 'private' the default mode, but unfortunately it breaks the
backward compatibility. So, let's make the default container IPC mode
per-daemon configurable (with the built-in default set to 'shareable'
for now). The default can be changed either via a daemon CLI option
(--default-shm-mode) or a daemon.json configuration file parameter
of the same name.
Note one can only set either 'shareable' or 'private' IPC modes as a
daemon default (i.e. in this context 'host', 'container', or 'none'
do not make much sense).
Some other changes this patch introduces are:
1. A mount for /dev/shm is added to default OCI Linux spec.
2. IpcMode.Valid() is simplified to remove duplicated code that parsed
'container:ID' form. Note the old version used to check that ID does
not contain a semicolon -- this is no longer the case (tests are
modified accordingly). The motivation is we should either do a
proper check for container ID validity, or don't check it at all
(since it is checked in other places anyway). I chose the latter.
3. IpcMode.Container() is modified to not return container ID if the
mode value does not start with "container:", unifying the check to
be the same as in IpcMode.IsContainer().
3. IPC mode unit tests (runconfig/hostconfig_test.go) are modified
to add checks for newly added values.
[v2: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-51345997]
[v3: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-53902833]
[v4: addressed the case of upgrading from older daemon, in this case
container.HostConfig.IpcMode is unset and this is valid]
[v5: document old and new IpcMode values in api/swagger.yaml]
[v6: add the 'none' mode, changelog entry to docs/api/version-history.md]
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2017-06-27 17:58:50 -04:00
|
|
|
if shmPath == "" {
|
|
|
|
return nil
|
|
|
|
}
|
2020-04-17 06:01:01 -04:00
|
|
|
if err = mount.Unmount(shmPath); err != nil && !errors.Is(err, os.ErrNotExist) {
|
2018-10-22 21:30:34 -04:00
|
|
|
return err
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
Implement none, private, and shareable ipc modes
Since the commit d88fe447df0e8 ("Add support for sharing /dev/shm/ and
/dev/mqueue between containers") container's /dev/shm is mounted on the
host first, then bind-mounted inside the container. This is done that
way in order to be able to share this container's IPC namespace
(and the /dev/shm mount point) with another container.
Unfortunately, this functionality breaks container checkpoint/restore
(even if IPC is not shared). Since /dev/shm is an external mount, its
contents is not saved by `criu checkpoint`, and so upon restore any
application that tries to access data under /dev/shm is severily
disappointed (which usually results in a fatal crash).
This commit solves the issue by introducing new IPC modes for containers
(in addition to 'host' and 'container:ID'). The new modes are:
- 'shareable': enables sharing this container's IPC with others
(this used to be the implicit default);
- 'private': disables sharing this container's IPC.
In 'private' mode, container's /dev/shm is truly mounted inside the
container, without any bind-mounting from the host, which solves the
issue.
While at it, let's also implement 'none' mode. The motivation, as
eloquently put by Justin Cormack, is:
> I wondered a while back about having a none shm mode, as currently it is
> not possible to have a totally unwriteable container as there is always
> a /dev/shm writeable mount. It is a bit of a niche case (and clearly
> should never be allowed to be daemon default) but it would be trivial to
> add now so maybe we should...
...so here's yet yet another mode:
- 'none': no /dev/shm mount inside the container (though it still
has its own private IPC namespace).
Now, to ultimately solve the abovementioned checkpoint/restore issue, we'd
need to make 'private' the default mode, but unfortunately it breaks the
backward compatibility. So, let's make the default container IPC mode
per-daemon configurable (with the built-in default set to 'shareable'
for now). The default can be changed either via a daemon CLI option
(--default-shm-mode) or a daemon.json configuration file parameter
of the same name.
Note one can only set either 'shareable' or 'private' IPC modes as a
daemon default (i.e. in this context 'host', 'container', or 'none'
do not make much sense).
Some other changes this patch introduces are:
1. A mount for /dev/shm is added to default OCI Linux spec.
2. IpcMode.Valid() is simplified to remove duplicated code that parsed
'container:ID' form. Note the old version used to check that ID does
not contain a semicolon -- this is no longer the case (tests are
modified accordingly). The motivation is we should either do a
proper check for container ID validity, or don't check it at all
(since it is checked in other places anyway). I chose the latter.
3. IpcMode.Container() is modified to not return container ID if the
mode value does not start with "container:", unifying the check to
be the same as in IpcMode.IsContainer().
3. IPC mode unit tests (runconfig/hostconfig_test.go) are modified
to add checks for newly added values.
[v2: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-51345997]
[v3: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-53902833]
[v4: addressed the case of upgrading from older daemon, in this case
container.HostConfig.IpcMode is unset and this is valid]
[v5: document old and new IpcMode values in api/swagger.yaml]
[v6: add the 'none' mode, changelog entry to docs/api/version-history.md]
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2017-06-27 17:58:50 -04:00
|
|
|
return nil
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// IpcMounts returns the list of IPC mounts
|
2016-03-18 14:50:19 -04:00
|
|
|
func (container *Container) IpcMounts() []Mount {
|
|
|
|
var mounts []Mount
|
2021-06-11 15:01:18 -04:00
|
|
|
parser := volumemounts.NewParser()
|
2015-11-12 14:55:17 -05:00
|
|
|
|
Implement none, private, and shareable ipc modes
Since the commit d88fe447df0e8 ("Add support for sharing /dev/shm/ and
/dev/mqueue between containers") container's /dev/shm is mounted on the
host first, then bind-mounted inside the container. This is done that
way in order to be able to share this container's IPC namespace
(and the /dev/shm mount point) with another container.
Unfortunately, this functionality breaks container checkpoint/restore
(even if IPC is not shared). Since /dev/shm is an external mount, its
contents is not saved by `criu checkpoint`, and so upon restore any
application that tries to access data under /dev/shm is severily
disappointed (which usually results in a fatal crash).
This commit solves the issue by introducing new IPC modes for containers
(in addition to 'host' and 'container:ID'). The new modes are:
- 'shareable': enables sharing this container's IPC with others
(this used to be the implicit default);
- 'private': disables sharing this container's IPC.
In 'private' mode, container's /dev/shm is truly mounted inside the
container, without any bind-mounting from the host, which solves the
issue.
While at it, let's also implement 'none' mode. The motivation, as
eloquently put by Justin Cormack, is:
> I wondered a while back about having a none shm mode, as currently it is
> not possible to have a totally unwriteable container as there is always
> a /dev/shm writeable mount. It is a bit of a niche case (and clearly
> should never be allowed to be daemon default) but it would be trivial to
> add now so maybe we should...
...so here's yet yet another mode:
- 'none': no /dev/shm mount inside the container (though it still
has its own private IPC namespace).
Now, to ultimately solve the abovementioned checkpoint/restore issue, we'd
need to make 'private' the default mode, but unfortunately it breaks the
backward compatibility. So, let's make the default container IPC mode
per-daemon configurable (with the built-in default set to 'shareable'
for now). The default can be changed either via a daemon CLI option
(--default-shm-mode) or a daemon.json configuration file parameter
of the same name.
Note one can only set either 'shareable' or 'private' IPC modes as a
daemon default (i.e. in this context 'host', 'container', or 'none'
do not make much sense).
Some other changes this patch introduces are:
1. A mount for /dev/shm is added to default OCI Linux spec.
2. IpcMode.Valid() is simplified to remove duplicated code that parsed
'container:ID' form. Note the old version used to check that ID does
not contain a semicolon -- this is no longer the case (tests are
modified accordingly). The motivation is we should either do a
proper check for container ID validity, or don't check it at all
(since it is checked in other places anyway). I chose the latter.
3. IpcMode.Container() is modified to not return container ID if the
mode value does not start with "container:", unifying the check to
be the same as in IpcMode.IsContainer().
3. IPC mode unit tests (runconfig/hostconfig_test.go) are modified
to add checks for newly added values.
[v2: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-51345997]
[v3: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-53902833]
[v4: addressed the case of upgrading from older daemon, in this case
container.HostConfig.IpcMode is unset and this is valid]
[v5: document old and new IpcMode values in api/swagger.yaml]
[v6: add the 'none' mode, changelog entry to docs/api/version-history.md]
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2017-06-27 17:58:50 -04:00
|
|
|
if container.HasMountFor("/dev/shm") {
|
|
|
|
return mounts
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
Implement none, private, and shareable ipc modes
Since the commit d88fe447df0e8 ("Add support for sharing /dev/shm/ and
/dev/mqueue between containers") container's /dev/shm is mounted on the
host first, then bind-mounted inside the container. This is done that
way in order to be able to share this container's IPC namespace
(and the /dev/shm mount point) with another container.
Unfortunately, this functionality breaks container checkpoint/restore
(even if IPC is not shared). Since /dev/shm is an external mount, its
contents is not saved by `criu checkpoint`, and so upon restore any
application that tries to access data under /dev/shm is severily
disappointed (which usually results in a fatal crash).
This commit solves the issue by introducing new IPC modes for containers
(in addition to 'host' and 'container:ID'). The new modes are:
- 'shareable': enables sharing this container's IPC with others
(this used to be the implicit default);
- 'private': disables sharing this container's IPC.
In 'private' mode, container's /dev/shm is truly mounted inside the
container, without any bind-mounting from the host, which solves the
issue.
While at it, let's also implement 'none' mode. The motivation, as
eloquently put by Justin Cormack, is:
> I wondered a while back about having a none shm mode, as currently it is
> not possible to have a totally unwriteable container as there is always
> a /dev/shm writeable mount. It is a bit of a niche case (and clearly
> should never be allowed to be daemon default) but it would be trivial to
> add now so maybe we should...
...so here's yet yet another mode:
- 'none': no /dev/shm mount inside the container (though it still
has its own private IPC namespace).
Now, to ultimately solve the abovementioned checkpoint/restore issue, we'd
need to make 'private' the default mode, but unfortunately it breaks the
backward compatibility. So, let's make the default container IPC mode
per-daemon configurable (with the built-in default set to 'shareable'
for now). The default can be changed either via a daemon CLI option
(--default-shm-mode) or a daemon.json configuration file parameter
of the same name.
Note one can only set either 'shareable' or 'private' IPC modes as a
daemon default (i.e. in this context 'host', 'container', or 'none'
do not make much sense).
Some other changes this patch introduces are:
1. A mount for /dev/shm is added to default OCI Linux spec.
2. IpcMode.Valid() is simplified to remove duplicated code that parsed
'container:ID' form. Note the old version used to check that ID does
not contain a semicolon -- this is no longer the case (tests are
modified accordingly). The motivation is we should either do a
proper check for container ID validity, or don't check it at all
(since it is checked in other places anyway). I chose the latter.
3. IpcMode.Container() is modified to not return container ID if the
mode value does not start with "container:", unifying the check to
be the same as in IpcMode.IsContainer().
3. IPC mode unit tests (runconfig/hostconfig_test.go) are modified
to add checks for newly added values.
[v2: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-51345997]
[v3: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-53902833]
[v4: addressed the case of upgrading from older daemon, in this case
container.HostConfig.IpcMode is unset and this is valid]
[v5: document old and new IpcMode values in api/swagger.yaml]
[v6: add the 'none' mode, changelog entry to docs/api/version-history.md]
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2017-06-27 17:58:50 -04:00
|
|
|
if container.ShmPath == "" {
|
|
|
|
return mounts
|
|
|
|
}
|
|
|
|
|
|
|
|
label.SetFileLabel(container.ShmPath, container.MountLabel)
|
|
|
|
mounts = append(mounts, Mount{
|
|
|
|
Source: container.ShmPath,
|
|
|
|
Destination: "/dev/shm",
|
|
|
|
Writable: true,
|
2017-08-01 13:32:44 -04:00
|
|
|
Propagation: string(parser.DefaultPropagationMode()),
|
Implement none, private, and shareable ipc modes
Since the commit d88fe447df0e8 ("Add support for sharing /dev/shm/ and
/dev/mqueue between containers") container's /dev/shm is mounted on the
host first, then bind-mounted inside the container. This is done that
way in order to be able to share this container's IPC namespace
(and the /dev/shm mount point) with another container.
Unfortunately, this functionality breaks container checkpoint/restore
(even if IPC is not shared). Since /dev/shm is an external mount, its
contents is not saved by `criu checkpoint`, and so upon restore any
application that tries to access data under /dev/shm is severily
disappointed (which usually results in a fatal crash).
This commit solves the issue by introducing new IPC modes for containers
(in addition to 'host' and 'container:ID'). The new modes are:
- 'shareable': enables sharing this container's IPC with others
(this used to be the implicit default);
- 'private': disables sharing this container's IPC.
In 'private' mode, container's /dev/shm is truly mounted inside the
container, without any bind-mounting from the host, which solves the
issue.
While at it, let's also implement 'none' mode. The motivation, as
eloquently put by Justin Cormack, is:
> I wondered a while back about having a none shm mode, as currently it is
> not possible to have a totally unwriteable container as there is always
> a /dev/shm writeable mount. It is a bit of a niche case (and clearly
> should never be allowed to be daemon default) but it would be trivial to
> add now so maybe we should...
...so here's yet yet another mode:
- 'none': no /dev/shm mount inside the container (though it still
has its own private IPC namespace).
Now, to ultimately solve the abovementioned checkpoint/restore issue, we'd
need to make 'private' the default mode, but unfortunately it breaks the
backward compatibility. So, let's make the default container IPC mode
per-daemon configurable (with the built-in default set to 'shareable'
for now). The default can be changed either via a daemon CLI option
(--default-shm-mode) or a daemon.json configuration file parameter
of the same name.
Note one can only set either 'shareable' or 'private' IPC modes as a
daemon default (i.e. in this context 'host', 'container', or 'none'
do not make much sense).
Some other changes this patch introduces are:
1. A mount for /dev/shm is added to default OCI Linux spec.
2. IpcMode.Valid() is simplified to remove duplicated code that parsed
'container:ID' form. Note the old version used to check that ID does
not contain a semicolon -- this is no longer the case (tests are
modified accordingly). The motivation is we should either do a
proper check for container ID validity, or don't check it at all
(since it is checked in other places anyway). I chose the latter.
3. IpcMode.Container() is modified to not return container ID if the
mode value does not start with "container:", unifying the check to
be the same as in IpcMode.IsContainer().
3. IPC mode unit tests (runconfig/hostconfig_test.go) are modified
to add checks for newly added values.
[v2: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-51345997]
[v3: addressed review at https://github.com/moby/moby/pull/34087#pullrequestreview-53902833]
[v4: addressed the case of upgrading from older daemon, in this case
container.HostConfig.IpcMode is unset and this is valid]
[v5: document old and new IpcMode values in api/swagger.yaml]
[v6: add the 'none' mode, changelog entry to docs/api/version-history.md]
Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
2017-06-27 17:58:50 -04:00
|
|
|
})
|
2015-11-12 14:55:17 -05:00
|
|
|
|
2016-03-18 14:50:19 -04:00
|
|
|
return mounts
|
2015-12-28 06:19:26 -05:00
|
|
|
}
|
|
|
|
|
2017-04-28 14:48:52 -04:00
|
|
|
// SecretMounts returns the mounts for the secret path.
|
2017-12-18 16:02:23 -05:00
|
|
|
func (container *Container) SecretMounts() ([]Mount, error) {
|
2017-04-11 13:34:19 -04:00
|
|
|
var mounts []Mount
|
|
|
|
for _, r := range container.SecretReferences {
|
2017-04-28 14:48:52 -04:00
|
|
|
if r.File == nil {
|
|
|
|
continue
|
|
|
|
}
|
2017-12-18 16:02:23 -05:00
|
|
|
src, err := container.SecretFilePath(*r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-11 13:34:19 -04:00
|
|
|
mounts = append(mounts, Mount{
|
2017-12-18 16:02:23 -05:00
|
|
|
Source: src,
|
2017-04-11 13:34:19 -04:00
|
|
|
Destination: getSecretTargetPath(r),
|
2016-10-19 12:22:02 -04:00
|
|
|
Writable: false,
|
2017-04-11 13:34:19 -04:00
|
|
|
})
|
2016-10-19 12:22:02 -04:00
|
|
|
}
|
2017-06-26 21:46:30 -04:00
|
|
|
for _, r := range container.ConfigReferences {
|
2018-01-17 10:49:58 -05:00
|
|
|
fPath, err := container.ConfigFilePath(*r)
|
2017-06-26 21:46:30 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
mounts = append(mounts, Mount{
|
|
|
|
Source: fPath,
|
2021-05-01 15:41:34 -04:00
|
|
|
Destination: getConfigTargetPath(r),
|
2017-06-26 21:46:30 -04:00
|
|
|
Writable: false,
|
|
|
|
})
|
|
|
|
}
|
2016-10-19 12:22:02 -04:00
|
|
|
|
2017-12-18 16:02:23 -05:00
|
|
|
return mounts, nil
|
2016-10-19 12:22:02 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// UnmountSecrets unmounts the local tmpfs for secrets
|
|
|
|
func (container *Container) UnmountSecrets() error {
|
2017-12-18 16:02:23 -05:00
|
|
|
p, err := container.SecretMountPath()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(p); err != nil {
|
2016-10-28 20:30:22 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
}
|
2016-10-31 11:53:43 -04:00
|
|
|
return err
|
2016-10-28 20:30:22 -04:00
|
|
|
}
|
|
|
|
|
2017-12-18 16:02:23 -05:00
|
|
|
return mount.RecursiveUnmount(p)
|
2016-10-19 12:22:02 -04:00
|
|
|
}
|
|
|
|
|
2017-07-19 10:20:13 -04:00
|
|
|
type conflictingUpdateOptions string
|
|
|
|
|
|
|
|
func (e conflictingUpdateOptions) Error() string {
|
|
|
|
return string(e)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e conflictingUpdateOptions) Conflict() {}
|
|
|
|
|
2017-02-22 17:02:20 -05:00
|
|
|
// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container.
|
2016-01-21 23:49:02 -05:00
|
|
|
func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error {
|
2016-01-04 10:58:20 -05:00
|
|
|
// update resources of container
|
2015-12-28 06:19:26 -05:00
|
|
|
resources := hostConfig.Resources
|
|
|
|
cResources := &container.HostConfig.Resources
|
2017-02-18 01:04:37 -05:00
|
|
|
|
|
|
|
// validate NanoCPUs, CPUPeriod, and CPUQuota
|
2017-05-21 19:24:07 -04:00
|
|
|
// Because NanoCPU effectively updates CPUPeriod/CPUQuota,
|
2017-02-18 01:04:37 -05:00
|
|
|
// once NanoCPU is already set, updating CPUPeriod/CPUQuota will be blocked, and vice versa.
|
|
|
|
// In the following we make sure the intended update (resources) does not conflict with the existing (cResource).
|
|
|
|
if resources.NanoCPUs > 0 && cResources.CPUPeriod > 0 {
|
2017-07-19 10:20:13 -04:00
|
|
|
return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Period has already been set")
|
2017-02-18 01:04:37 -05:00
|
|
|
}
|
|
|
|
if resources.NanoCPUs > 0 && cResources.CPUQuota > 0 {
|
2017-07-19 10:20:13 -04:00
|
|
|
return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Quota has already been set")
|
2017-02-18 01:04:37 -05:00
|
|
|
}
|
|
|
|
if resources.CPUPeriod > 0 && cResources.NanoCPUs > 0 {
|
2017-07-19 10:20:13 -04:00
|
|
|
return conflictingUpdateOptions("Conflicting options: CPU Period cannot be updated as NanoCPUs has already been set")
|
2017-02-18 01:04:37 -05:00
|
|
|
}
|
|
|
|
if resources.CPUQuota > 0 && cResources.NanoCPUs > 0 {
|
2017-07-19 10:20:13 -04:00
|
|
|
return conflictingUpdateOptions("Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set")
|
2017-02-18 01:04:37 -05:00
|
|
|
}
|
|
|
|
|
2015-12-28 06:19:26 -05:00
|
|
|
if resources.BlkioWeight != 0 {
|
|
|
|
cResources.BlkioWeight = resources.BlkioWeight
|
|
|
|
}
|
|
|
|
if resources.CPUShares != 0 {
|
|
|
|
cResources.CPUShares = resources.CPUShares
|
|
|
|
}
|
2017-02-18 01:04:37 -05:00
|
|
|
if resources.NanoCPUs != 0 {
|
|
|
|
cResources.NanoCPUs = resources.NanoCPUs
|
|
|
|
}
|
2015-12-28 06:19:26 -05:00
|
|
|
if resources.CPUPeriod != 0 {
|
|
|
|
cResources.CPUPeriod = resources.CPUPeriod
|
|
|
|
}
|
|
|
|
if resources.CPUQuota != 0 {
|
|
|
|
cResources.CPUQuota = resources.CPUQuota
|
|
|
|
}
|
|
|
|
if resources.CpusetCpus != "" {
|
|
|
|
cResources.CpusetCpus = resources.CpusetCpus
|
|
|
|
}
|
|
|
|
if resources.CpusetMems != "" {
|
|
|
|
cResources.CpusetMems = resources.CpusetMems
|
|
|
|
}
|
|
|
|
if resources.Memory != 0 {
|
2016-08-08 06:36:03 -04:00
|
|
|
// if memory limit smaller than already set memoryswap limit and doesn't
|
|
|
|
// update the memoryswap limit, then error out.
|
|
|
|
if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 {
|
2017-07-19 10:20:13 -04:00
|
|
|
return conflictingUpdateOptions("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time")
|
2016-08-08 06:36:03 -04:00
|
|
|
}
|
2015-12-28 06:19:26 -05:00
|
|
|
cResources.Memory = resources.Memory
|
|
|
|
}
|
|
|
|
if resources.MemorySwap != 0 {
|
|
|
|
cResources.MemorySwap = resources.MemorySwap
|
|
|
|
}
|
|
|
|
if resources.MemoryReservation != 0 {
|
|
|
|
cResources.MemoryReservation = resources.MemoryReservation
|
|
|
|
}
|
|
|
|
if resources.KernelMemory != 0 {
|
|
|
|
cResources.KernelMemory = resources.KernelMemory
|
|
|
|
}
|
2017-11-22 16:21:40 -05:00
|
|
|
if resources.CPURealtimePeriod != 0 {
|
|
|
|
cResources.CPURealtimePeriod = resources.CPURealtimePeriod
|
|
|
|
}
|
|
|
|
if resources.CPURealtimeRuntime != 0 {
|
|
|
|
cResources.CPURealtimeRuntime = resources.CPURealtimeRuntime
|
|
|
|
}
|
2017-04-11 07:28:13 -04:00
|
|
|
if resources.PidsLimit != nil {
|
|
|
|
cResources.PidsLimit = resources.PidsLimit
|
|
|
|
}
|
2016-01-04 10:58:20 -05:00
|
|
|
|
|
|
|
// update HostConfig of container
|
|
|
|
if hostConfig.RestartPolicy.Name != "" {
|
2016-08-15 04:38:47 -04:00
|
|
|
if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
|
2017-07-19 10:20:13 -04:00
|
|
|
return conflictingUpdateOptions("Restart policy cannot be updated because AutoRemove is enabled for the container")
|
2016-08-15 04:38:47 -04:00
|
|
|
}
|
2016-01-04 10:58:20 -05:00
|
|
|
container.HostConfig.RestartPolicy = hostConfig.RestartPolicy
|
|
|
|
}
|
2015-12-28 06:19:26 -05:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-03 13:53:06 -04:00
|
|
|
// DetachAndUnmount uses a detached mount on all mount destinations, then
|
|
|
|
// unmounts each volume normally.
|
|
|
|
// This is used from daemon/archive for `docker cp`
|
|
|
|
func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error {
|
|
|
|
networkMounts := container.NetworkMounts()
|
|
|
|
mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts))
|
2015-11-12 14:55:17 -05:00
|
|
|
|
|
|
|
for _, mntPoint := range container.MountPoints {
|
|
|
|
dest, err := container.GetResourcePath(mntPoint.Destination)
|
|
|
|
if err != nil {
|
2016-10-03 13:53:06 -04:00
|
|
|
logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err)
|
|
|
|
continue
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
2016-10-03 13:53:06 -04:00
|
|
|
mountPaths = append(mountPaths, dest)
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
|
|
|
|
2016-10-03 13:53:06 -04:00
|
|
|
for _, m := range networkMounts {
|
|
|
|
dest, err := container.GetResourcePath(m.Destination)
|
|
|
|
if err != nil {
|
|
|
|
logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err)
|
|
|
|
continue
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
2016-10-03 13:53:06 -04:00
|
|
|
mountPaths = append(mountPaths, dest)
|
|
|
|
}
|
2015-11-12 14:55:17 -05:00
|
|
|
|
2016-10-03 13:53:06 -04:00
|
|
|
for _, mountPath := range mountPaths {
|
2018-05-10 16:47:12 -04:00
|
|
|
if err := mount.Unmount(mountPath); err != nil {
|
2018-10-22 21:30:34 -04:00
|
|
|
logrus.WithError(err).WithField("container", container.ID).
|
|
|
|
Warn("Unable to unmount")
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
|
|
|
}
|
2016-10-03 13:53:06 -04:00
|
|
|
return container.UnmountVolumes(volumeEventLog)
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
|
|
|
|
2018-12-04 01:07:06 -05:00
|
|
|
// ignoreUnsupportedXAttrs ignores errors when extended attributes
|
|
|
|
// are not supported
|
|
|
|
func ignoreUnsupportedXAttrs() fs.CopyDirOpt {
|
|
|
|
xeh := func(dst, src, xattrKey string, err error) error {
|
2020-04-17 06:01:01 -04:00
|
|
|
if !errors.Is(err, syscall.ENOTSUP) {
|
2018-12-04 01:07:06 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fs.WithXAttrErrorHandler(xeh)
|
|
|
|
}
|
|
|
|
|
2015-11-12 14:55:17 -05:00
|
|
|
// copyExistingContents copies from the source to the destination and
|
|
|
|
// ensures the ownership is appropriately set.
|
|
|
|
func copyExistingContents(source, destination string) error {
|
2021-08-24 06:10:50 -04:00
|
|
|
dstList, err := os.ReadDir(destination)
|
2015-11-12 14:55:17 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-02-12 15:27:34 -05:00
|
|
|
if len(dstList) != 0 {
|
|
|
|
// destination is not empty, do not copy
|
|
|
|
return nil
|
2017-06-15 10:27:06 -04:00
|
|
|
}
|
2018-12-04 01:07:06 -05:00
|
|
|
return fs.CopyDir(destination, source, ignoreUnsupportedXAttrs())
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// TmpfsMounts returns the list of tmpfs mounts
|
2016-09-22 16:14:15 -04:00
|
|
|
func (container *Container) TmpfsMounts() ([]Mount, error) {
|
2016-03-18 14:50:19 -04:00
|
|
|
var mounts []Mount
|
2015-11-12 14:55:17 -05:00
|
|
|
for dest, data := range container.HostConfig.Tmpfs {
|
2016-03-18 14:50:19 -04:00
|
|
|
mounts = append(mounts, Mount{
|
2015-11-12 14:55:17 -05:00
|
|
|
Source: "tmpfs",
|
|
|
|
Destination: dest,
|
|
|
|
Data: data,
|
|
|
|
})
|
|
|
|
}
|
2021-06-11 15:01:18 -04:00
|
|
|
parser := volumemounts.NewParser()
|
2016-09-22 16:14:15 -04:00
|
|
|
for dest, mnt := range container.MountPoints {
|
|
|
|
if mnt.Type == mounttypes.TypeTmpfs {
|
2017-08-01 13:32:44 -04:00
|
|
|
data, err := parser.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly)
|
2016-09-22 16:14:15 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
mounts = append(mounts, Mount{
|
|
|
|
Source: "tmpfs",
|
|
|
|
Destination: dest,
|
|
|
|
Data: data,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return mounts, nil
|
2015-11-12 14:55:17 -05:00
|
|
|
}
|
2016-01-27 16:03:09 -05:00
|
|
|
|
2017-02-22 12:11:10 -05:00
|
|
|
// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock.
|
|
|
|
func (container *Container) GetMountPoints() []types.MountPoint {
|
|
|
|
mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
|
|
|
|
for _, m := range container.MountPoints {
|
|
|
|
mountPoints = append(mountPoints, types.MountPoint{
|
|
|
|
Type: m.Type,
|
|
|
|
Name: m.Name,
|
|
|
|
Source: m.Path(),
|
|
|
|
Destination: m.Destination,
|
|
|
|
Driver: m.Driver,
|
|
|
|
Mode: m.Mode,
|
|
|
|
RW: m.RW,
|
|
|
|
Propagation: m.Propagation,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return mountPoints
|
|
|
|
}
|
2018-01-17 10:49:58 -05:00
|
|
|
|
|
|
|
// ConfigFilePath returns the path to the on-disk location of a config.
|
|
|
|
// On unix, configs are always considered secret
|
|
|
|
func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) (string, error) {
|
|
|
|
mounts, err := container.SecretMountPath()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return filepath.Join(mounts, configRef.ConfigID), nil
|
|
|
|
}
|