mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
4bafaa00aa
The containerd client is very chatty at the best of times. Because the libcontained API is stateless and references containers and processes by string ID for every method call, the implementation is essentially forced to use the containerd client in a way which amplifies the number of redundant RPCs invoked to perform any operation. The libcontainerd remote implementation has to reload the containerd container, task and/or process metadata for nearly every operation. This in turn amplifies the number of context switches between dockerd and containerd to perform any container operation or handle a containerd event, increasing the load on the system which could otherwise be allocated to workloads. Overhaul the libcontainerd interface to reduce the impedance mismatch with the containerd client so that the containerd client can be used more efficiently. Split the API out into container, task and process interfaces which the consumer is expected to retain so that libcontainerd can retain state---especially the analogous containerd client objects---without having to manage any state-store inside the libcontainerd client. Signed-off-by: Cory Snider <csnider@mirantis.com>
54 lines
1.7 KiB
Go
54 lines
1.7 KiB
Go
package daemon // import "github.com/docker/docker/daemon"
|
|
|
|
import (
|
|
"context"
|
|
|
|
"github.com/containerd/containerd/pkg/apparmor"
|
|
"github.com/docker/docker/container"
|
|
"github.com/docker/docker/oci/caps"
|
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
|
)
|
|
|
|
func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, ec *container.ExecConfig, p *specs.Process) error {
|
|
if len(ec.User) > 0 {
|
|
var err error
|
|
p.User, err = getUser(ec.Container, ec.User)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
if ec.Privileged {
|
|
p.Capabilities = &specs.LinuxCapabilities{
|
|
Bounding: caps.GetAllCapabilities(),
|
|
Permitted: caps.GetAllCapabilities(),
|
|
Effective: caps.GetAllCapabilities(),
|
|
}
|
|
}
|
|
if apparmor.HostSupports() {
|
|
var appArmorProfile string
|
|
if ec.Container.AppArmorProfile != "" {
|
|
appArmorProfile = ec.Container.AppArmorProfile
|
|
} else if ec.Container.HostConfig.Privileged {
|
|
// `docker exec --privileged` does not currently disable AppArmor
|
|
// profiles. Privileged configuration of the container is inherited
|
|
appArmorProfile = unconfinedAppArmorProfile
|
|
} else {
|
|
appArmorProfile = defaultAppArmorProfile
|
|
}
|
|
|
|
if appArmorProfile == defaultAppArmorProfile {
|
|
// Unattended upgrades and other fun services can unload AppArmor
|
|
// profiles inadvertently. Since we cannot store our profile in
|
|
// /etc/apparmor.d, nor can we practically add other ways of
|
|
// telling the system to keep our profile loaded, in order to make
|
|
// sure that we keep the default profile enabled we dynamically
|
|
// reload it if necessary.
|
|
if err := ensureDefaultAppArmorProfile(); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
p.ApparmorProfile = appArmorProfile
|
|
}
|
|
s := &specs.Spec{Process: p}
|
|
return WithRlimits(daemon, ec.Container)(ctx, nil, nil, s)
|
|
}
|