1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Move --rm to daemon side

`--rm` is a client side flag which caused lots of problems:
1. if client lost connection to daemon, including client crash or be
killed, there's no way to clean garbage container.
2. if docker stop a `--rm` container, this container won't be
autoremoved.
3. if docker daemon restart, container is also left over.
4. bug: `docker run --rm busybox fakecmd` will exit without cleanup.

In a word, client side `--rm` flag isn't sufficient for garbage
collection. Move the `--rm` flag to daemon will be more reasonable.

What this commit do is:
1. implement a `--rm` on daemon side, adding one flag `AutoRemove` into
HostConfig.
2. Allow `run --rm -d`, no conflicting `--rm` and `-d` any more,
auto-remove can work on detach mode.
3. `docker restart` a `--rm` container will succeed, the container won't
be autoremoved.

This commit will help a lot for daemon to do garbage collection for
temporary containers.

Signed-off-by: Zhang Wei <zhangwei555@huawei.com>
This commit is contained in:
Zhang Wei 2016-03-02 00:30:27 +08:00
parent 2684459ed4
commit 3c2886d8a4
9 changed files with 113 additions and 46 deletions

View file

@ -25,7 +25,6 @@ import (
)
type runOptions struct {
autoRemove bool
detach bool
sigProxy bool
name string
@ -55,7 +54,6 @@ func NewRunCommand(dockerCli *client.DockerCli) *cobra.Command {
flags.SetInterspersed(false)
// These are flags not stored in Config/HostConfig
flags.BoolVar(&opts.autoRemove, "rm", false, "Automatically remove the container when it exits")
flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID")
flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process")
flags.StringVar(&opts.name, "name", "", "Assign a name to the container")
@ -87,7 +85,6 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
flAttach *opttypes.ListOpts
ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d")
ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm")
ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d")
)
config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts)
@ -127,9 +124,6 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
return ErrConflictAttachDetach
}
}
if opts.autoRemove {
return ErrConflictDetachAutoRemove
}
config.AttachStdin = false
config.AttachStdout = false
@ -172,7 +166,7 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
fmt.Fprintf(stdout, "%s\n", createResponse.ID)
}()
}
if opts.autoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) {
if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
return ErrConflictRestartPolicyAndAutoRemove
}
attach := config.AttachStdin || config.AttachStdout || config.AttachStderr
@ -225,16 +219,6 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
})
}
if opts.autoRemove {
defer func() {
// Explicitly not sharing the context as it could be "Done" (by calling cancelFun)
// and thus the container would not be removed.
if err := removeContainer(dockerCli, context.Background(), createResponse.ID, true, false, true); err != nil {
fmt.Fprintf(stderr, "%v\n", err)
}
}()
}
//start the container
if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil {
// If we have holdHijackedConnection, we should notify
@ -272,30 +256,17 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
var status int
// Attached mode
if opts.autoRemove {
// Autoremove: wait for the container to finish, retrieve
// the exit code and remove the container
if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil {
return runStartContainerErr(err)
}
if _, status, err = getExitCode(dockerCli, ctx, createResponse.ID); err != nil {
return err
}
} else {
// No Autoremove: Simply retrieve the exit code
if !config.Tty && hostConfig.RestartPolicy.IsNone() {
// In non-TTY mode, we can't detach, so we must wait for container exit
if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil {
return err
}
} else {
// In TTY mode, there is a race: if the process dies too slowly, the state could
// be updated after the getExitCode call and result in the wrong exit code being reported
if _, status, err = getExitCode(dockerCli, ctx, createResponse.ID); err != nil {
return err
}
}
if !config.Tty {
// In non-TTY mode, we can't detach, so we must wait for container exit
client.ContainerWait(context.Background(), createResponse.ID)
}
// In TTY mode, there is a race: if the process dies too slowly, the state could
// be updated after the getExitCode call and result in the wrong exit code being reported
if _, status, err = dockerCli.GetExitCode(ctx, createResponse.ID); err != nil {
return fmt.Errorf("tty: status: %d; error: %v;", status, err)
}
if status != 0 {
return cli.StatusError{StatusCode: status}
}

View file

@ -238,6 +238,10 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon
return nil, nil
}
if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
return nil, fmt.Errorf("Can't create 'AutoRemove' container with restart policy")
}
for port := range hostConfig.PortBindings {
_, portStr := nat.SplitProtoPort(string(port))
if _, err := nat.ParsePort(portStr); err != nil {

View file

@ -147,6 +147,7 @@ func (daemon *Daemon) restore() error {
}
var migrateLegacyLinks bool
removeContainers := make(map[string]*container.Container)
restartContainers := make(map[*container.Container]chan struct{})
activeSandboxes := make(map[string]interface{})
for _, c := range containers {
@ -194,10 +195,14 @@ func (daemon *Daemon) restore() error {
}
// fixme: only if not running
// get list of containers we need to restart
if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestart() {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
if !c.IsRunning() && !c.IsPaused() {
if daemon.configStore.AutoRestart && c.ShouldRestart() {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
removeContainers[c.ID] = c
}
}
if c.RemovalInProgress {
@ -283,6 +288,18 @@ func (daemon *Daemon) restore() error {
}
group.Wait()
removeGroup := sync.WaitGroup{}
for id := range removeContainers {
removeGroup.Add(1)
go func(cid string) {
defer removeGroup.Done()
if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.Errorf("Failed to remove container %s: %s", cid, err)
}
}(id)
}
removeGroup.Wait()
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
@ -295,7 +312,11 @@ func (daemon *Daemon) restore() error {
// has a volume and the volume dirver is not available.
if _, ok := restartContainers[c]; ok {
continue
} else if _, ok := removeContainers[c.ID]; ok {
// container is automatically removed, skip it.
continue
}
group.Add(1)
go func(c *container.Container) {
defer group.Done()

View file

@ -3,6 +3,7 @@ package daemon
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/container"
)
@ -35,11 +36,25 @@ func (daemon *Daemon) containerRestart(container *container.Container, seconds i
defer daemon.Unmount(container)
}
if err := daemon.containerStop(container, seconds); err != nil {
// set AutoRemove flag to false before stop so the container won't be
// removed during restart process
autoRemove := container.HostConfig.AutoRemove
container.HostConfig.AutoRemove = false
err := daemon.containerStop(container, seconds)
// restore AutoRemove irrespective of whether the stop worked or not
container.HostConfig.AutoRemove = autoRemove
// containerStop will write HostConfig to disk, we shall restore AutoRemove
// in disk too
if toDiskErr := container.ToDiskLocking(); toDiskErr != nil {
logrus.Errorf("Write container to disk error: %v", toDiskErr)
}
if err != nil {
return err
}
if err := daemon.containerStart(container); err != nil {
if err = daemon.containerStart(container); err != nil {
return err
}

View file

@ -14,6 +14,7 @@ import (
"github.com/docker/docker/errors"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/runconfig"
"github.com/docker/engine-api/types"
containertypes "github.com/docker/engine-api/types/container"
)
@ -197,4 +198,10 @@ func (daemon *Daemon) Cleanup(container *container.Container) {
}
}
container.CancelAttachContext()
// if containers AutoRemove flag is set, remove it after clean up
if container.HostConfig.AutoRemove {
// If containers lock is not released, goroutine will guarantee no block
go daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true})
}
}

View file

@ -2742,3 +2742,28 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) {
out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls")
c.Assert(err, check.IsNil, check.Commentf(out))
}
func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *check.C) {
err := s.d.StartWithBusybox()
c.Assert(err, checker.IsNil)
// top1 will exist after daemon restarts
out, err := s.d.Cmd("run", "-d", "--name", "top1", "busybox:latest", "top")
c.Assert(err, checker.IsNil, check.Commentf("run top1: %v", out))
// top2 will be removed after daemon restarts
out, err = s.d.Cmd("run", "-d", "--rm", "--name", "top2", "busybox:latest", "top")
c.Assert(err, checker.IsNil, check.Commentf("run top2: %v", out))
out, err = s.d.Cmd("ps")
c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should be running"))
c.Assert(out, checker.Contains, "top2", check.Commentf("top2 should be running"))
// now restart daemon gracefully
err = s.d.Restart()
c.Assert(err, checker.IsNil)
out, err = s.d.Cmd("ps", "-a")
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should exist after daemon restarts"))
c.Assert(out, checker.Not(checker.Contains), "top2", check.Commentf("top2 should be removed after daemon restarts"))
}

View file

@ -240,3 +240,15 @@ func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *check.C) {
dockerCmd(c, "start", id1)
dockerCmd(c, "start", id2)
}
func (s *DockerSuite) TestRestartAutoRmoveContainer(c *check.C) {
out, _ := runSleepingContainer(c, "--rm")
id := strings.TrimSpace(string(out))
dockerCmd(c, "restart", id)
err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second)
c.Assert(err, checker.IsNil)
out, _ = dockerCmd(c, "ps")
c.Assert(out, checker.Contains, id[:12], check.Commentf("container should be restarted instead of removed: %v", out))
}

View file

@ -4498,6 +4498,15 @@ func (s *DockerSuite) TestRunAddHostInHostMode(c *check.C) {
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
}
func (s *DockerSuite) TestRunRmAndWait(c *check.C) {
dockerCmd(c, "run", "--name=test", "--rm", "-d", "busybox", "sh", "-c", "sleep 3;exit 2")
out, code, err := dockerCmdWithError("wait", "test")
c.Assert(err, checker.IsNil, check.Commentf("out: %s; exit code: %d", out, code))
c.Assert(out, checker.Equals, "2\n", check.Commentf("exit code: %d", code))
c.Assert(code, checker.Equals, 0)
}
// Test case for #23498
func (s *DockerSuite) TestRunUnsetEntrypoint(c *check.C) {
testRequires(c, DaemonIsLinux)

View file

@ -103,6 +103,7 @@ type ContainerOptions struct {
flHealthTimeout time.Duration
flHealthRetries int
flRuntime string
flAutoRemove *bool
Image string
Args []string
@ -163,6 +164,7 @@ func AddFlags(flags *pflag.FlagSet) *ContainerOptions {
flags.Var(copts.flUlimits, "ulimit", "Ulimit options")
flags.StringVarP(&copts.flUser, "user", "u", "", "Username or UID (format: <name|uid>[:<group|gid>])")
flags.StringVarP(&copts.flWorkingDir, "workdir", "w", "", "Working directory inside the container")
flags.BoolVarP(&copts.flAutoRemove, "rm", false, "Automatically remove the container when it exits")
// Security
flags.Var(&copts.flCapAdd, "cap-add", "Add Linux capabilities")
@ -553,6 +555,7 @@ func Parse(flags *pflag.FlagSet, copts *ContainerOptions) (*container.Config, *c
Binds: binds,
ContainerIDFile: copts.flContainerIDFile,
OomScoreAdj: copts.flOomScoreAdj,
AutoRemove: copts.flAutoRemove,
Privileged: copts.flPrivileged,
PortBindings: portBindings,
Links: copts.flLinks.GetAll(),