mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #20848 from WeiZhang555/move-rm-to-daemon
Move --rm to daemon side
This commit is contained in:
commit
31e3970394
15 changed files with 227 additions and 52 deletions
|
@ -8,6 +8,7 @@ import (
|
|||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
|
@ -25,7 +26,6 @@ import (
|
|||
)
|
||||
|
||||
type runOptions struct {
|
||||
autoRemove bool
|
||||
detach bool
|
||||
sigProxy bool
|
||||
name string
|
||||
|
@ -55,7 +55,6 @@ func NewRunCommand(dockerCli *client.DockerCli) *cobra.Command {
|
|||
flags.SetInterspersed(false)
|
||||
|
||||
// These are flags not stored in Config/HostConfig
|
||||
flags.BoolVar(&opts.autoRemove, "rm", false, "Automatically remove the container when it exits")
|
||||
flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID")
|
||||
flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process")
|
||||
flags.StringVar(&opts.name, "name", "", "Assign a name to the container")
|
||||
|
@ -87,7 +86,6 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
|
|||
flAttach *opttypes.ListOpts
|
||||
ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d")
|
||||
ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm")
|
||||
ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d")
|
||||
)
|
||||
|
||||
config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts)
|
||||
|
@ -127,9 +125,6 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
|
|||
return ErrConflictAttachDetach
|
||||
}
|
||||
}
|
||||
if opts.autoRemove {
|
||||
return ErrConflictDetachAutoRemove
|
||||
}
|
||||
|
||||
config.AttachStdin = false
|
||||
config.AttachStdout = false
|
||||
|
@ -149,6 +144,7 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
|
|||
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.GetTtySize()
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
ctx, cancelFun := context.WithCancel(context.Background())
|
||||
|
||||
createResponse, err := createContainer(ctx, dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name)
|
||||
|
@ -172,7 +168,7 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
|
|||
fmt.Fprintf(stdout, "%s\n", createResponse.ID)
|
||||
}()
|
||||
}
|
||||
if opts.autoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) {
|
||||
if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
|
||||
return ErrConflictRestartPolicyAndAutoRemove
|
||||
}
|
||||
attach := config.AttachStdin || config.AttachStdout || config.AttachStderr
|
||||
|
@ -225,16 +221,6 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
|
|||
})
|
||||
}
|
||||
|
||||
if opts.autoRemove {
|
||||
defer func() {
|
||||
// Explicitly not sharing the context as it could be "Done" (by calling cancelFun)
|
||||
// and thus the container would not be removed.
|
||||
if err := removeContainer(dockerCli, context.Background(), createResponse.ID, true, false, true); err != nil {
|
||||
fmt.Fprintf(stderr, "%v\n", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
//start the container
|
||||
if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil {
|
||||
// If we have holdHijackedConnection, we should notify
|
||||
|
@ -246,6 +232,11 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
|
|||
}
|
||||
|
||||
reportError(stderr, cmdPath, err.Error(), false)
|
||||
if hostConfig.AutoRemove {
|
||||
if _, errWait := waitExitOrRemoved(dockerCli, context.Background(), createResponse.ID, hostConfig.AutoRemove, startTime); errWait != nil {
|
||||
logrus.Debugf("Error waiting container's removal: %v", errWait)
|
||||
}
|
||||
}
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
|
||||
|
@ -272,30 +263,11 @@ func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions,
|
|||
var status int
|
||||
|
||||
// Attached mode
|
||||
if opts.autoRemove {
|
||||
// Autoremove: wait for the container to finish, retrieve
|
||||
// the exit code and remove the container
|
||||
if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil {
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
if _, status, err = getExitCode(dockerCli, ctx, createResponse.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// No Autoremove: Simply retrieve the exit code
|
||||
if !config.Tty && hostConfig.RestartPolicy.IsNone() {
|
||||
// In non-TTY mode, we can't detach, so we must wait for container exit
|
||||
if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// In TTY mode, there is a race: if the process dies too slowly, the state could
|
||||
// be updated after the getExitCode call and result in the wrong exit code being reported
|
||||
if _, status, err = getExitCode(dockerCli, ctx, createResponse.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
status, err = waitExitOrRemoved(dockerCli, ctx, createResponse.ID, hostConfig.AutoRemove, startTime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting container to exit: %v", err)
|
||||
}
|
||||
|
||||
if status != 0 {
|
||||
return cli.StatusError{StatusCode: status}
|
||||
}
|
||||
|
|
|
@ -1,12 +1,100 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/client"
|
||||
"github.com/docker/docker/api/client/system"
|
||||
clientapi "github.com/docker/engine-api/client"
|
||||
"github.com/docker/engine-api/types"
|
||||
"github.com/docker/engine-api/types/events"
|
||||
"github.com/docker/engine-api/types/filters"
|
||||
)
|
||||
|
||||
func waitExitOrRemoved(dockerCli *client.DockerCli, ctx context.Context, containerID string, waitRemove bool, since time.Time) (int, error) {
|
||||
if len(containerID) == 0 {
|
||||
// containerID can never be empty
|
||||
panic("Internal Error: waitExitOrRemoved needs a containerID as parameter")
|
||||
}
|
||||
|
||||
var exitCode int
|
||||
exitChan := make(chan struct{})
|
||||
detachChan := make(chan struct{})
|
||||
destroyChan := make(chan struct{})
|
||||
|
||||
// Start watch events
|
||||
eh := system.InitEventHandler()
|
||||
eh.Handle("die", func(e events.Message) {
|
||||
if len(e.Actor.Attributes) > 0 {
|
||||
for k, v := range e.Actor.Attributes {
|
||||
if k == "exitCode" {
|
||||
var err error
|
||||
if exitCode, err = strconv.Atoi(v); err != nil {
|
||||
logrus.Errorf("Can't convert %q to int: %v", v, err)
|
||||
}
|
||||
close(exitChan)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
eh.Handle("detach", func(e events.Message) {
|
||||
exitCode = 0
|
||||
close(detachChan)
|
||||
})
|
||||
eh.Handle("destroy", func(e events.Message) {
|
||||
close(destroyChan)
|
||||
})
|
||||
|
||||
eventChan := make(chan events.Message)
|
||||
go eh.Watch(eventChan)
|
||||
defer close(eventChan)
|
||||
|
||||
// Get events via Events API
|
||||
f := filters.NewArgs()
|
||||
f.Add("type", "container")
|
||||
f.Add("container", containerID)
|
||||
options := types.EventsOptions{
|
||||
Since: fmt.Sprintf("%d", since.Unix()),
|
||||
Filters: f,
|
||||
}
|
||||
resBody, err := dockerCli.Client().Events(ctx, options)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("can't get events from daemon: %v", err)
|
||||
}
|
||||
defer resBody.Close()
|
||||
|
||||
go system.DecodeEvents(resBody, func(event events.Message, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
eventChan <- event
|
||||
return nil
|
||||
})
|
||||
|
||||
if waitRemove {
|
||||
select {
|
||||
case <-destroyChan:
|
||||
return exitCode, nil
|
||||
case <-detachChan:
|
||||
return 0, nil
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case <-exitChan:
|
||||
return exitCode, nil
|
||||
case <-detachChan:
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getExitCode performs an inspect on the container. It returns
|
||||
// the running state and the exit code.
|
||||
func getExitCode(dockerCli *client.DockerCli, ctx context.Context, containerID string) (bool, int, error) {
|
||||
|
|
|
@ -210,7 +210,7 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon
|
|||
if config.WorkingDir != "" {
|
||||
config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics
|
||||
if !system.IsAbs(config.WorkingDir) {
|
||||
return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path", config.WorkingDir)
|
||||
return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -238,15 +238,19 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
|
||||
return nil, fmt.Errorf("can't create 'AutoRemove' container with restart policy")
|
||||
}
|
||||
|
||||
for port := range hostConfig.PortBindings {
|
||||
_, portStr := nat.SplitProtoPort(string(port))
|
||||
if _, err := nat.ParsePort(portStr); err != nil {
|
||||
return nil, fmt.Errorf("Invalid port specification: %q", portStr)
|
||||
return nil, fmt.Errorf("invalid port specification: %q", portStr)
|
||||
}
|
||||
for _, pb := range hostConfig.PortBindings[port] {
|
||||
_, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort)
|
||||
return nil, fmt.Errorf("invalid port specification: %q", pb.HostPort)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -147,6 +147,7 @@ func (daemon *Daemon) restore() error {
|
|||
}
|
||||
|
||||
var migrateLegacyLinks bool
|
||||
removeContainers := make(map[string]*container.Container)
|
||||
restartContainers := make(map[*container.Container]chan struct{})
|
||||
activeSandboxes := make(map[string]interface{})
|
||||
for _, c := range containers {
|
||||
|
@ -194,10 +195,14 @@ func (daemon *Daemon) restore() error {
|
|||
}
|
||||
// fixme: only if not running
|
||||
// get list of containers we need to restart
|
||||
if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestart() {
|
||||
mapLock.Lock()
|
||||
restartContainers[c] = make(chan struct{})
|
||||
mapLock.Unlock()
|
||||
if !c.IsRunning() && !c.IsPaused() {
|
||||
if daemon.configStore.AutoRestart && c.ShouldRestart() {
|
||||
mapLock.Lock()
|
||||
restartContainers[c] = make(chan struct{})
|
||||
mapLock.Unlock()
|
||||
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
|
||||
removeContainers[c.ID] = c
|
||||
}
|
||||
}
|
||||
|
||||
if c.RemovalInProgress {
|
||||
|
@ -283,6 +288,18 @@ func (daemon *Daemon) restore() error {
|
|||
}
|
||||
group.Wait()
|
||||
|
||||
removeGroup := sync.WaitGroup{}
|
||||
for id := range removeContainers {
|
||||
removeGroup.Add(1)
|
||||
go func(cid string) {
|
||||
if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
||||
logrus.Errorf("Failed to remove container %s: %s", cid, err)
|
||||
}
|
||||
removeGroup.Done()
|
||||
}(id)
|
||||
}
|
||||
removeGroup.Wait()
|
||||
|
||||
// any containers that were started above would already have had this done,
|
||||
// however we need to now prepare the mountpoints for the rest of the containers as well.
|
||||
// This shouldn't cause any issue running on the containers that already had this run.
|
||||
|
@ -295,7 +312,11 @@ func (daemon *Daemon) restore() error {
|
|||
// has a volume and the volume dirver is not available.
|
||||
if _, ok := restartContainers[c]; ok {
|
||||
continue
|
||||
} else if _, ok := removeContainers[c.ID]; ok {
|
||||
// container is automatically removed, skip it.
|
||||
continue
|
||||
}
|
||||
|
||||
group.Add(1)
|
||||
go func(c *container.Container) {
|
||||
defer group.Done()
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/docker/docker/daemon/exec"
|
||||
"github.com/docker/docker/libcontainerd"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/engine-api/types"
|
||||
)
|
||||
|
||||
// StateChanged updates daemon state changes from containerd
|
||||
|
@ -29,6 +30,14 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
|
|||
daemon.updateHealthMonitor(c)
|
||||
daemon.LogContainerEvent(c, "oom")
|
||||
case libcontainerd.StateExit:
|
||||
// if containers AutoRemove flag is set, remove it after clean up
|
||||
if c.HostConfig.AutoRemove {
|
||||
defer func() {
|
||||
if err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
||||
logrus.Errorf("can't remove container %s: %v", c.ID, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.Wait()
|
||||
|
|
|
@ -3,6 +3,7 @@ package daemon
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/container"
|
||||
)
|
||||
|
||||
|
@ -35,11 +36,25 @@ func (daemon *Daemon) containerRestart(container *container.Container, seconds i
|
|||
defer daemon.Unmount(container)
|
||||
}
|
||||
|
||||
if err := daemon.containerStop(container, seconds); err != nil {
|
||||
// set AutoRemove flag to false before stop so the container won't be
|
||||
// removed during restart process
|
||||
autoRemove := container.HostConfig.AutoRemove
|
||||
|
||||
container.HostConfig.AutoRemove = false
|
||||
err := daemon.containerStop(container, seconds)
|
||||
// restore AutoRemove irrespective of whether the stop worked or not
|
||||
container.HostConfig.AutoRemove = autoRemove
|
||||
// containerStop will write HostConfig to disk, we shall restore AutoRemove
|
||||
// in disk too
|
||||
if toDiskErr := container.ToDiskLocking(); toDiskErr != nil {
|
||||
logrus.Errorf("Write container to disk error: %v", toDiskErr)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := daemon.containerStart(container); err != nil {
|
||||
if err = daemon.containerStart(container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/docker/docker/errors"
|
||||
"github.com/docker/docker/libcontainerd"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/engine-api/types"
|
||||
containertypes "github.com/docker/engine-api/types/container"
|
||||
)
|
||||
|
||||
|
@ -112,6 +113,14 @@ func (daemon *Daemon) containerStart(container *container.Container) (err error)
|
|||
}
|
||||
container.ToDisk()
|
||||
daemon.Cleanup(container)
|
||||
// if containers AutoRemove flag is set, remove it after clean up
|
||||
if container.HostConfig.AutoRemove {
|
||||
container.Unlock()
|
||||
if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
|
||||
logrus.Errorf("can't remove container %s: %v", container.ID, err)
|
||||
}
|
||||
container.Lock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -116,6 +116,8 @@ This section lists each version from latest to oldest. Each listing includes a
|
|||
|
||||
[Docker Remote API v1.25](docker_remote_api_v1.25.md) documentation
|
||||
|
||||
* `POST /containers/create` now takes `AutoRemove` in HostConfig, to enable auto-removal of the container on daemon side when the container's process exits.
|
||||
|
||||
### v1.24 API changes
|
||||
|
||||
[Docker Remote API v1.24](docker_remote_api_v1.24.md) documentation
|
||||
|
|
|
@ -325,6 +325,7 @@ Create a container
|
|||
"CapDrop": ["MKNOD"],
|
||||
"GroupAdd": ["newgroup"],
|
||||
"RestartPolicy": { "Name": "", "MaximumRetryCount": 0 },
|
||||
"AutoRemove": true,
|
||||
"NetworkMode": "bridge",
|
||||
"Devices": [],
|
||||
"Ulimits": [{}],
|
||||
|
@ -458,6 +459,8 @@ Create a container
|
|||
The default is not to restart. (optional)
|
||||
An ever increasing delay (double the previous delay, starting at 100mS)
|
||||
is added before each restart to prevent flooding the server.
|
||||
- **AutoRemove** - Boolean value, set to `true` to automatically remove the container on daemon side
|
||||
when the container's process exits.
|
||||
- **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled.
|
||||
supported values are: `host`.
|
||||
- **NetworkMode** - Sets the networking mode for the container. Supported
|
||||
|
@ -599,6 +602,7 @@ Return low-level information on the container `id`
|
|||
"MaximumRetryCount": 2,
|
||||
"Name": "on-failure"
|
||||
},
|
||||
"AutoRemove": true,
|
||||
"LogConfig": {
|
||||
"Config": null,
|
||||
"Type": "json-file"
|
||||
|
|
|
@ -476,7 +476,7 @@ func (s *DockerSuite) TestContainerApiBadPort(c *check.C) {
|
|||
status, body, err := sockRequest("POST", "/containers/create", config)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(status, checker.Equals, http.StatusInternalServerError)
|
||||
c.Assert(getErrorMessage(c, body), checker.Equals, `Invalid port specification: "aa80"`, check.Commentf("Incorrect error msg: %s", body))
|
||||
c.Assert(getErrorMessage(c, body), checker.Equals, `invalid port specification: "aa80"`, check.Commentf("Incorrect error msg: %s", body))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerApiCreate(c *check.C) {
|
||||
|
@ -700,7 +700,7 @@ func (s *DockerSuite) TestContainerApiInvalidPortSyntax(c *check.C) {
|
|||
|
||||
b, err := readBody(body)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(string(b[:]), checker.Contains, "Invalid port")
|
||||
c.Assert(string(b[:]), checker.Contains, "invalid port")
|
||||
}
|
||||
|
||||
// Issue 7941 - test to make sure a "null" in JSON is just ignored.
|
||||
|
|
|
@ -2742,3 +2742,28 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) {
|
|||
out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls")
|
||||
c.Assert(err, check.IsNil, check.Commentf(out))
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *check.C) {
|
||||
err := s.d.StartWithBusybox()
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// top1 will exist after daemon restarts
|
||||
out, err := s.d.Cmd("run", "-d", "--name", "top1", "busybox:latest", "top")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("run top1: %v", out))
|
||||
// top2 will be removed after daemon restarts
|
||||
out, err = s.d.Cmd("run", "-d", "--rm", "--name", "top2", "busybox:latest", "top")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("run top2: %v", out))
|
||||
|
||||
out, err = s.d.Cmd("ps")
|
||||
c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should be running"))
|
||||
c.Assert(out, checker.Contains, "top2", check.Commentf("top2 should be running"))
|
||||
|
||||
// now restart daemon gracefully
|
||||
err = s.d.Restart()
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
out, err = s.d.Cmd("ps", "-a")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
|
||||
c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should exist after daemon restarts"))
|
||||
c.Assert(out, checker.Not(checker.Contains), "top2", check.Commentf("top2 should be removed after daemon restarts"))
|
||||
}
|
||||
|
|
|
@ -240,3 +240,15 @@ func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *check.C) {
|
|||
dockerCmd(c, "start", id1)
|
||||
dockerCmd(c, "start", id2)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestRestartAutoRmoveContainer(c *check.C) {
|
||||
out, _ := runSleepingContainer(c, "--rm")
|
||||
|
||||
id := strings.TrimSpace(string(out))
|
||||
dockerCmd(c, "restart", id)
|
||||
err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
out, _ = dockerCmd(c, "ps")
|
||||
c.Assert(out, checker.Contains, id[:12], check.Commentf("container should be restarted instead of removed: %v", out))
|
||||
}
|
||||
|
|
|
@ -4498,6 +4498,15 @@ func (s *DockerSuite) TestRunAddHostInHostMode(c *check.C) {
|
|||
c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out))
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestRunRmAndWait(c *check.C) {
|
||||
dockerCmd(c, "run", "--name=test", "--rm", "-d", "busybox", "sh", "-c", "sleep 3;exit 2")
|
||||
|
||||
out, code, err := dockerCmdWithError("wait", "test")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("out: %s; exit code: %d", out, code))
|
||||
c.Assert(out, checker.Equals, "2\n", check.Commentf("exit code: %d", code))
|
||||
c.Assert(code, checker.Equals, 0)
|
||||
}
|
||||
|
||||
// Test case for #23498
|
||||
func (s *DockerSuite) TestRunUnsetEntrypoint(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
|
|
|
@ -468,7 +468,9 @@ its root filesystem mounted as read only prohibiting any writes.
|
|||
Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped).
|
||||
|
||||
**--rm**=*true*|*false*
|
||||
Automatically remove the container when it exits (incompatible with -d). The default is *false*.
|
||||
Automatically remove the container when it exits. The default is *false*.
|
||||
`--rm` flag can work together with `-d`, and auto-removal will be done on daemon side. Note that it's
|
||||
incompatible with any restart policy other than `none`.
|
||||
|
||||
**--security-opt**=[]
|
||||
Security Options
|
||||
|
|
|
@ -103,6 +103,7 @@ type ContainerOptions struct {
|
|||
flHealthTimeout time.Duration
|
||||
flHealthRetries int
|
||||
flRuntime string
|
||||
flAutoRemove bool
|
||||
|
||||
Image string
|
||||
Args []string
|
||||
|
@ -163,6 +164,7 @@ func AddFlags(flags *pflag.FlagSet) *ContainerOptions {
|
|||
flags.Var(copts.flUlimits, "ulimit", "Ulimit options")
|
||||
flags.StringVarP(&copts.flUser, "user", "u", "", "Username or UID (format: <name|uid>[:<group|gid>])")
|
||||
flags.StringVarP(&copts.flWorkingDir, "workdir", "w", "", "Working directory inside the container")
|
||||
flags.BoolVar(&copts.flAutoRemove, "rm", false, "Automatically remove the container when it exits")
|
||||
|
||||
// Security
|
||||
flags.Var(&copts.flCapAdd, "cap-add", "Add Linux capabilities")
|
||||
|
@ -553,6 +555,7 @@ func Parse(flags *pflag.FlagSet, copts *ContainerOptions) (*container.Config, *c
|
|||
Binds: binds,
|
||||
ContainerIDFile: copts.flContainerIDFile,
|
||||
OomScoreAdj: copts.flOomScoreAdj,
|
||||
AutoRemove: copts.flAutoRemove,
|
||||
Privileged: copts.flPrivileged,
|
||||
PortBindings: portBindings,
|
||||
Links: copts.flLinks.GetAll(),
|
||||
|
|
Loading…
Add table
Reference in a new issue