1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #8062 from vishh/run_in_phase2

Add support for 'docker exec' - phase 2
This commit is contained in:
Alexandr Morozov 2014-09-16 23:56:12 +04:00
commit 00fd008170
21 changed files with 936 additions and 55 deletions

View file

@ -625,7 +625,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
v.Set("stderr", "1") v.Set("stderr", "1")
cErr = utils.Go(func() error { cErr = utils.Go(func() error {
return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil) return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil)
}) })
} }
@ -653,7 +653,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
if *openStdin || *attach { if *openStdin || *attach {
if tty && cli.isTerminal { if tty && cli.isTerminal {
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
log.Errorf("Error monitoring TTY size: %s", err) log.Errorf("Error monitoring TTY size: %s", err)
} }
} }
@ -1805,7 +1805,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
) )
if tty && cli.isTerminal { if tty && cli.isTerminal {
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
log.Debugf("Error monitoring TTY size: %s", err) log.Debugf("Error monitoring TTY size: %s", err)
} }
} }
@ -1827,7 +1827,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
defer signal.StopCatch(sigc) defer signal.StopCatch(sigc)
} }
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil); err != nil { if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil); err != nil {
return err return err
} }
@ -2109,7 +2109,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
} }
errCh = utils.Go(func() error { errCh = utils.Go(func() error {
return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked) return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil)
}) })
} else { } else {
close(hijacked) close(hijacked)
@ -2136,7 +2136,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
} }
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal { if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal {
if err := cli.monitorTtySize(runResult.Get("Id")); err != nil { if err := cli.monitorTtySize(runResult.Get("Id"), false); err != nil {
log.Errorf("Error monitoring TTY size: %s", err) log.Errorf("Error monitoring TTY size: %s", err)
} }
} }
@ -2299,3 +2299,101 @@ func (cli *DockerCli) CmdLoad(args ...string) error {
} }
return nil return nil
} }
func (cli *DockerCli) CmdExec(args ...string) error {
cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in an existing container")
execConfig, err := runconfig.ParseExec(cmd, args)
if err != nil {
return err
}
if execConfig.Container == "" {
cmd.Usage()
return nil
}
stream, _, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, false)
if err != nil {
return err
}
var execResult engine.Env
if err := execResult.Decode(stream); err != nil {
return err
}
execID := execResult.Get("Id")
if execID == "" {
fmt.Fprintf(cli.out, "exec ID empty")
return nil
}
if execConfig.Detach {
if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, false)); err != nil {
return err
}
return nil
}
// Interactive exec requested.
var (
out, stderr io.Writer
in io.ReadCloser
hijacked = make(chan io.Closer)
errCh chan error
)
// Block the return until the chan gets closed
defer func() {
log.Debugf("End of CmdExec(), Waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
log.Errorf("Hijack did not finish (chan still open)")
}
}()
if execConfig.AttachStdin {
in = cli.in
}
if execConfig.AttachStdout {
out = cli.out
}
if execConfig.AttachStderr {
if execConfig.Tty {
stderr = cli.out
} else {
stderr = cli.err
}
}
errCh = utils.Go(func() error {
return cli.hijack("POST", "/exec/"+execID+"/start", execConfig.Tty, in, out, stderr, hijacked, execConfig)
})
// Acknowledge the hijack before starting
select {
case closer := <-hijacked:
// Make sure that hijack gets closed when returning. (result
// in closing hijack chan and freeing server's goroutines.
if closer != nil {
defer closer.Close()
}
case err := <-errCh:
if err != nil {
log.Debugf("Error hijack: %s", err)
return err
}
}
if execConfig.Tty && cli.isTerminal {
if err := cli.monitorTtySize(execID, true); err != nil {
log.Errorf("Error monitoring TTY size: %s", err)
}
}
if err := <-errCh; err != nil {
log.Debugf("Error hijack: %s", err)
return err
}
return nil
}

View file

@ -25,14 +25,18 @@ func (cli *DockerCli) dial() (net.Conn, error) {
return net.Dial(cli.proto, cli.addr) return net.Dial(cli.proto, cli.addr)
} }
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error { func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error {
defer func() { defer func() {
if started != nil { if started != nil {
close(started) close(started)
} }
}() }()
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil) params, err := cli.encodeData(data)
if err != nil {
return err
}
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params)
if err != nil { if err != nil {
return err return err
} }

View file

@ -40,24 +40,31 @@ func (cli *DockerCli) HTTPClient() *http.Client {
return &http.Client{Transport: tr} return &http.Client{Transport: tr}
} }
func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) {
params := bytes.NewBuffer(nil) params := bytes.NewBuffer(nil)
if data != nil { if data != nil {
if env, ok := data.(engine.Env); ok { if env, ok := data.(engine.Env); ok {
if err := env.Encode(params); err != nil { if err := env.Encode(params); err != nil {
return nil, -1, err return nil, err
} }
} else { } else {
buf, err := json.Marshal(data) buf, err := json.Marshal(data)
if err != nil { if err != nil {
return nil, -1, err return nil, err
} }
if _, err := params.Write(buf); err != nil { if _, err := params.Write(buf); err != nil {
return nil, err
}
}
}
return params, nil
}
func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
params, err := cli.encodeData(data)
if err != nil {
return nil, -1, err return nil, -1, err
} }
}
}
req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params)
if err != nil { if err != nil {
return nil, -1, err return nil, -1, err
@ -108,6 +115,7 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b
} }
return nil, resp.StatusCode, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) return nil, resp.StatusCode, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body))
} }
return resp.Body, resp.StatusCode, nil return resp.Body, resp.StatusCode, nil
} }
@ -172,7 +180,7 @@ func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in
return nil return nil
} }
func (cli *DockerCli) resizeTty(id string) { func (cli *DockerCli) resizeTty(id string, isExec bool) {
height, width := cli.getTtySize() height, width := cli.getTtySize()
if height == 0 && width == 0 { if height == 0 && width == 0 {
return return
@ -180,7 +188,15 @@ func (cli *DockerCli) resizeTty(id string) {
v := url.Values{} v := url.Values{}
v.Set("h", strconv.Itoa(height)) v.Set("h", strconv.Itoa(height))
v.Set("w", strconv.Itoa(width)) v.Set("w", strconv.Itoa(width))
if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
path := ""
if !isExec {
path = "/containers/" + id + "/resize?"
} else {
path = "/exec/" + id + "/resize?"
}
if _, _, err := readBody(cli.call("POST", path+v.Encode(), nil, false)); err != nil {
log.Debugf("Error resize: %s", err) log.Debugf("Error resize: %s", err)
} }
} }
@ -219,14 +235,14 @@ func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
return state.GetBool("Running"), state.GetInt("ExitCode"), nil return state.GetBool("Running"), state.GetInt("ExitCode"), nil
} }
func (cli *DockerCli) monitorTtySize(id string) error { func (cli *DockerCli) monitorTtySize(id string, isExec bool) error {
cli.resizeTty(id) cli.resizeTty(id, isExec)
sigchan := make(chan os.Signal, 1) sigchan := make(chan os.Signal, 1)
gosignal.Notify(sigchan, syscall.SIGWINCH) gosignal.Notify(sigchan, syscall.SIGWINCH)
go func() { go func() {
for _ = range sigchan { for _ = range sigchan {
cli.resizeTty(id) cli.resizeTty(id, isExec)
} }
}() }()
return nil return nil

View file

@ -663,6 +663,7 @@ func postContainersCreate(eng *engine.Engine, version version.Version, w http.Re
} }
out.Set("Id", engine.Tail(stdoutBuffer, 1)) out.Set("Id", engine.Tail(stdoutBuffer, 1))
out.SetList("Warnings", outWarnings) out.SetList("Warnings", outWarnings)
return writeJSON(w, http.StatusCreated, out) return writeJSON(w, http.StatusCreated, out)
} }
@ -1026,6 +1027,103 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp
return nil return nil
} }
func postContainerExecCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return nil
}
var (
out engine.Env
name = vars["name"]
job = eng.Job("execCreate", name)
stdoutBuffer = bytes.NewBuffer(nil)
)
if err := job.DecodeEnv(r.Body); err != nil {
return err
}
job.Stdout.Add(stdoutBuffer)
// Register an instance of Exec in container.
if err := job.Run(); err != nil {
fmt.Fprintf(os.Stderr, "Error setting up exec command in container %s: %s\n", name, err)
return err
}
// Return the ID
out.Set("Id", engine.Tail(stdoutBuffer, 1))
return writeJSON(w, http.StatusCreated, out)
}
func postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return nil
}
var (
name = vars["name"]
job = eng.Job("execStart", name)
errOut io.Writer = os.Stderr
)
if err := job.DecodeEnv(r.Body); err != nil {
return err
}
if !job.GetenvBool("Detach") {
// Setting up the streaming http interface.
inStream, outStream, err := hijackServer(w)
if err != nil {
return err
}
defer func() {
if tcpc, ok := inStream.(*net.TCPConn); ok {
tcpc.CloseWrite()
} else {
inStream.Close()
}
}()
defer func() {
if tcpc, ok := outStream.(*net.TCPConn); ok {
tcpc.CloseWrite()
} else if closer, ok := outStream.(io.Closer); ok {
closer.Close()
}
}()
var errStream io.Writer
fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
if !job.GetenvBool("Tty") && version.GreaterThanOrEqualTo("1.6") {
errStream = utils.NewStdWriter(outStream, utils.Stderr)
outStream = utils.NewStdWriter(outStream, utils.Stdout)
} else {
errStream = outStream
}
job.Stdin.Add(inStream)
job.Stdout.Add(outStream)
job.Stderr.Set(errStream)
errOut = outStream
}
// Now run the user process in container.
job.SetCloseIO(false)
if err := job.Run(); err != nil {
fmt.Fprintf(errOut, "Error starting exec command in container %s: %s\n", name, err)
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func postContainerExecResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
if err := eng.Job("execResize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil {
return err
}
return nil
}
func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
return nil return nil
@ -1148,6 +1246,9 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion st
"/containers/{name:.*}/resize": postContainersResize, "/containers/{name:.*}/resize": postContainersResize,
"/containers/{name:.*}/attach": postContainersAttach, "/containers/{name:.*}/attach": postContainersAttach,
"/containers/{name:.*}/copy": postContainersCopy, "/containers/{name:.*}/copy": postContainersCopy,
"/containers/{name:.*}/exec": postContainerExecCreate,
"/exec/{name:.*}/start": postContainerExecStart,
"/exec/{name:.*}/resize": postContainerExecResize,
}, },
"DELETE": { "DELETE": {
"/containers/{name:.*}": deleteContainers, "/containers/{name:.*}": deleteContainers,
@ -1334,6 +1435,7 @@ func ListenAndServe(proto, addr string, job *engine.Job) error {
return err return err
} }
} }
} }
if err := os.Chmod(addr, 0660); err != nil { if err := os.Chmod(addr, 0660); err != nil {
return err return err

View file

@ -407,7 +407,7 @@ func (b *Builder) run(c *daemon.Container) error {
// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
// but without hijacking for stdin. Also, with attach there can be race // but without hijacking for stdin. Also, with attach there can be race
// condition because of some output already was printed before it. // condition because of some output already was printed before it.
return <-b.Daemon.Attach(c, nil, nil, b.OutStream, b.ErrStream) return <-b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, nil, b.OutStream, b.ErrStream)
}) })
} }

View file

@ -103,7 +103,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
cStderr = job.Stderr cStderr = job.Stderr
} }
<-daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr) <-daemon.Attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdinCloser, cStdout, cStderr)
// If we are in stdinonce mode, wait for the process to end // If we are in stdinonce mode, wait for the process to end
// otherwise, simply return // otherwise, simply return
if container.Config.StdinOnce && !container.Config.Tty { if container.Config.StdinOnce && !container.Config.Tty {
@ -119,23 +119,25 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
// Attach and ContainerAttach. // Attach and ContainerAttach.
// //
// This method is in use by builder/builder.go. // This method is in use by builder/builder.go.
func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error {
var ( var (
cStdout, cStderr io.ReadCloser cStdout, cStderr io.ReadCloser
nJobs int nJobs int
errors = make(chan error, 3) errors = make(chan error, 3)
) )
if stdin != nil && container.Config.OpenStdin { // Connect stdin of container to the http conn.
if stdin != nil && openStdin {
nJobs++ nJobs++
if cStdin, err := container.StdinPipe(); err != nil { // Get the stdin pipe.
if cStdin, err := streamConfig.StdinPipe(); err != nil {
errors <- err errors <- err
} else { } else {
go func() { go func() {
log.Debugf("attach: stdin: begin") log.Debugf("attach: stdin: begin")
defer log.Debugf("attach: stdin: end") defer log.Debugf("attach: stdin: end")
// No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr
if container.Config.StdinOnce && !container.Config.Tty { if stdinOnce && !tty {
defer cStdin.Close() defer cStdin.Close()
} else { } else {
defer func() { defer func() {
@ -147,10 +149,11 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
} }
}() }()
} }
if container.Config.Tty { if tty {
_, err = utils.CopyEscapable(cStdin, stdin) _, err = utils.CopyEscapable(cStdin, stdin)
} else { } else {
_, err = io.Copy(cStdin, stdin) _, err = io.Copy(cStdin, stdin)
} }
if err == io.ErrClosedPipe { if err == io.ErrClosedPipe {
err = nil err = nil
@ -164,7 +167,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
} }
if stdout != nil { if stdout != nil {
nJobs++ nJobs++
if p, err := container.StdoutPipe(); err != nil { // Get a reader end of a pipe that is attached as stdout to the container.
if p, err := streamConfig.StdoutPipe(); err != nil {
errors <- err errors <- err
} else { } else {
cStdout = p cStdout = p
@ -172,7 +176,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
log.Debugf("attach: stdout: begin") log.Debugf("attach: stdout: begin")
defer log.Debugf("attach: stdout: end") defer log.Debugf("attach: stdout: end")
// If we are in StdinOnce mode, then close stdin // If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce && stdin != nil { if stdinOnce && stdin != nil {
defer stdin.Close() defer stdin.Close()
} }
if stdinCloser != nil { if stdinCloser != nil {
@ -189,11 +193,12 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
}() }()
} }
} else { } else {
// Point stdout of container to a no-op writer.
go func() { go func() {
if stdinCloser != nil { if stdinCloser != nil {
defer stdinCloser.Close() defer stdinCloser.Close()
} }
if cStdout, err := container.StdoutPipe(); err != nil { if cStdout, err := streamConfig.StdoutPipe(); err != nil {
log.Errorf("attach: stdout pipe: %s", err) log.Errorf("attach: stdout pipe: %s", err)
} else { } else {
io.Copy(&ioutils.NopWriter{}, cStdout) io.Copy(&ioutils.NopWriter{}, cStdout)
@ -202,7 +207,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
} }
if stderr != nil { if stderr != nil {
nJobs++ nJobs++
if p, err := container.StderrPipe(); err != nil { if p, err := streamConfig.StderrPipe(); err != nil {
errors <- err errors <- err
} else { } else {
cStderr = p cStderr = p
@ -210,7 +215,8 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
log.Debugf("attach: stderr: begin") log.Debugf("attach: stderr: begin")
defer log.Debugf("attach: stderr: end") defer log.Debugf("attach: stderr: end")
// If we are in StdinOnce mode, then close stdin // If we are in StdinOnce mode, then close stdin
if container.Config.StdinOnce && stdin != nil { // Why are we closing stdin here and above while handling stdout?
if stdinOnce && stdin != nil {
defer stdin.Close() defer stdin.Close()
} }
if stdinCloser != nil { if stdinCloser != nil {
@ -227,12 +233,13 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
}() }()
} }
} else { } else {
// Point stderr at a no-op writer.
go func() { go func() {
if stdinCloser != nil { if stdinCloser != nil {
defer stdinCloser.Close() defer stdinCloser.Close()
} }
if cStderr, err := container.StderrPipe(); err != nil { if cStderr, err := streamConfig.StderrPipe(); err != nil {
log.Errorf("attach: stdout pipe: %s", err) log.Errorf("attach: stdout pipe: %s", err)
} else { } else {
io.Copy(&ioutils.NopWriter{}, cStderr) io.Copy(&ioutils.NopWriter{}, cStderr)

View file

@ -88,6 +88,7 @@ type Container struct {
activeLinks map[string]*links.Link activeLinks map[string]*links.Link
monitor *containerMonitor monitor *containerMonitor
execCommands *execStore
} }
func (container *Container) FromDisk() error { func (container *Container) FromDisk() error {

View file

@ -85,6 +85,7 @@ type Daemon struct {
repository string repository string
sysInitPath string sysInitPath string
containers *contStore containers *contStore
execCommands *execStore
graph *graph.Graph graph *graph.Graph
repositories *graph.TagStore repositories *graph.TagStore
idIndex *truncindex.TruncIndex idIndex *truncindex.TruncIndex
@ -122,6 +123,9 @@ func (daemon *Daemon) Install(eng *engine.Engine) error {
"unpause": daemon.ContainerUnpause, "unpause": daemon.ContainerUnpause,
"wait": daemon.ContainerWait, "wait": daemon.ContainerWait,
"image_delete": daemon.ImageDelete, // FIXME: see above "image_delete": daemon.ImageDelete, // FIXME: see above
"execCreate": daemon.ContainerExecCreate,
"execStart": daemon.ContainerExecStart,
"execResize": daemon.ContainerExecResize,
} { } {
if err := eng.Register(name, method); err != nil { if err := eng.Register(name, method); err != nil {
return err return err
@ -496,17 +500,17 @@ func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) {
} }
} }
func (daemon *Daemon) getEntrypointAndArgs(config *runconfig.Config) (string, []string) { func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint, configCmd []string) (string, []string) {
var ( var (
entrypoint string entrypoint string
args []string args []string
) )
if len(config.Entrypoint) != 0 { if len(configEntrypoint) != 0 {
entrypoint = config.Entrypoint[0] entrypoint = configEntrypoint[0]
args = append(config.Entrypoint[1:], config.Cmd...) args = append(configEntrypoint[1:], configCmd...)
} else { } else {
entrypoint = config.Cmd[0] entrypoint = configCmd[0]
args = config.Cmd[1:] args = configCmd[1:]
} }
return entrypoint, args return entrypoint, args
} }
@ -522,7 +526,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i
} }
daemon.generateHostname(id, config) daemon.generateHostname(id, config)
entrypoint, args := daemon.getEntrypointAndArgs(config) entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd)
container := &Container{ container := &Container{
// FIXME: we should generate the ID here instead of receiving it as an argument // FIXME: we should generate the ID here instead of receiving it as an argument
@ -538,6 +542,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i
Driver: daemon.driver.String(), Driver: daemon.driver.String(),
ExecDriver: daemon.execDriver.Name(), ExecDriver: daemon.execDriver.Name(),
State: NewState(), State: NewState(),
execCommands: newExecStore(),
} }
container.root = daemon.containerRoot(container.ID) container.root = daemon.containerRoot(container.ID)
@ -846,6 +851,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error)
daemon := &Daemon{ daemon := &Daemon{
repository: daemonRepo, repository: daemonRepo,
containers: &contStore{s: make(map[string]*Container)}, containers: &contStore{s: make(map[string]*Container)},
execCommands: newExecStore(),
graph: g, graph: g,
repositories: repositories, repositories: repositories,
idIndex: truncindex.NewTruncIndex([]string{}), idIndex: truncindex.NewTruncIndex([]string{}),

294
daemon/exec.go Normal file
View file

@ -0,0 +1,294 @@
// build linux
package daemon
import (
"fmt"
"io"
"io/ioutil"
"sync"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/engine"
"github.com/docker/docker/pkg/broadcastwriter"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/log"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
)
type execConfig struct {
sync.Mutex
ID string
Running bool
ProcessConfig execdriver.ProcessConfig
StreamConfig
OpenStdin bool
OpenStderr bool
OpenStdout bool
Container *Container
}
type execStore struct {
s map[string]*execConfig
sync.Mutex
}
func newExecStore() *execStore {
return &execStore{s: make(map[string]*execConfig, 0)}
}
func (e *execStore) Add(id string, execConfig *execConfig) {
e.Lock()
e.s[id] = execConfig
e.Unlock()
}
func (e *execStore) Get(id string) *execConfig {
e.Lock()
res := e.s[id]
e.Unlock()
return res
}
func (e *execStore) Delete(id string) {
e.Lock()
delete(e.s, id)
e.Unlock()
}
func (execConfig *execConfig) Resize(h, w int) error {
return execConfig.ProcessConfig.Terminal.Resize(h, w)
}
func (d *Daemon) registerExecCommand(execConfig *execConfig) {
// Storing execs in container inorder to kill them gracefully whenever the container is stopped or removed.
execConfig.Container.execCommands.Add(execConfig.ID, execConfig)
// Storing execs in daemon for easy access via remote API.
d.execCommands.Add(execConfig.ID, execConfig)
}
func (d *Daemon) getExecConfig(name string) (*execConfig, error) {
if execConfig := d.execCommands.Get(name); execConfig != nil {
if !execConfig.Container.IsRunning() {
return nil, fmt.Errorf("Container %s is not not running", execConfig.Container.ID)
}
return execConfig, nil
}
return nil, fmt.Errorf("No exec '%s' in found in daemon", name)
}
func (d *Daemon) unregisterExecCommand(execConfig *execConfig) {
execConfig.Container.execCommands.Delete(execConfig.ID)
d.execCommands.Delete(execConfig.ID)
}
func (d *Daemon) getActiveContainer(name string) (*Container, error) {
container := d.Get(name)
if container == nil {
return nil, fmt.Errorf("No such container: %s", name)
}
if !container.IsRunning() {
return nil, fmt.Errorf("Container %s is not not running", name)
}
return container, nil
}
func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
if len(job.Args) != 1 {
return job.Errorf("Usage: %s [options] container command [args]", job.Name)
}
var name = job.Args[0]
container, err := d.getActiveContainer(name)
if err != nil {
return job.Error(err)
}
config := runconfig.ExecConfigFromJob(job)
entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd)
processConfig := execdriver.ProcessConfig{
Privileged: config.Privileged,
User: config.User,
Tty: config.Tty,
Entrypoint: entrypoint,
Arguments: args,
}
execConfig := &execConfig{
ID: utils.GenerateRandomID(),
OpenStdin: config.AttachStdin,
OpenStdout: config.AttachStdout,
OpenStderr: config.AttachStderr,
StreamConfig: StreamConfig{},
ProcessConfig: processConfig,
Container: container,
Running: false,
}
d.registerExecCommand(execConfig)
job.Printf("%s\n", execConfig.ID)
return engine.StatusOK
}
func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
if len(job.Args) != 1 {
return job.Errorf("Usage: %s [options] exec", job.Name)
}
var (
cStdin io.ReadCloser
cStdout, cStderr io.Writer
cStdinCloser io.Closer
execName = job.Args[0]
)
execConfig, err := d.getExecConfig(execName)
if err != nil {
return job.Error(err)
}
func() {
execConfig.Lock()
defer execConfig.Unlock()
if execConfig.Running {
err = fmt.Errorf("Error: Exec command %s is already running", execName)
}
execConfig.Running = true
}()
if err != nil {
return job.Error(err)
}
log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID)
container := execConfig.Container
if execConfig.OpenStdin {
r, w := io.Pipe()
go func() {
defer w.Close()
io.Copy(w, job.Stdin)
}()
cStdin = r
cStdinCloser = job.Stdin
}
if execConfig.OpenStdout {
cStdout = job.Stdout
}
if execConfig.OpenStderr {
cStderr = job.Stderr
}
execConfig.StreamConfig.stderr = broadcastwriter.New()
execConfig.StreamConfig.stdout = broadcastwriter.New()
// Attach to stdin
if execConfig.OpenStdin {
execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe()
} else {
execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
}
attachErr := d.Attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdinCloser, cStdout, cStderr)
execErr := make(chan error)
// Remove exec from daemon and container.
defer d.unregisterExecCommand(execConfig)
go func() {
err := container.Exec(execConfig)
if err != nil {
execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err)
}
}()
select {
case err := <-attachErr:
if err != nil {
return job.Errorf("attach failed with error: %s", err)
}
break
case err := <-execErr:
return job.Error(err)
}
return engine.StatusOK
}
func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
return d.execDriver.Exec(c.command, &execConfig.ProcessConfig, pipes, startCallback)
}
func (container *Container) Exec(execConfig *execConfig) error {
container.Lock()
defer container.Unlock()
waitStart := make(chan struct{})
callback := func(processConfig *execdriver.ProcessConfig, pid int) {
if processConfig.Tty {
// The callback is called after the process Start()
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
// which we close here.
if c, ok := processConfig.Stdout.(io.Closer); ok {
c.Close()
}
}
close(waitStart)
}
// We use a callback here instead of a goroutine and an chan for
// syncronization purposes
cErr := utils.Go(func() error { return container.monitorExec(execConfig, callback) })
// Exec should not return until the process is actually running
select {
case <-waitStart:
case err := <-cErr:
return err
}
return nil
}
func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error {
var (
err error
exitCode int
)
pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin)
exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback)
if err != nil {
log.Errorf("Error running command in existing container %s: %s", container.ID, err)
}
log.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
if execConfig.OpenStdin {
if err := execConfig.StreamConfig.stdin.Close(); err != nil {
log.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
}
}
if err := execConfig.StreamConfig.stdout.Clean(); err != nil {
log.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
}
if err := execConfig.StreamConfig.stderr.Clean(); err != nil {
log.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
}
if execConfig.ProcessConfig.Terminal != nil {
if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
log.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
}
}
return err
}

View file

@ -42,6 +42,8 @@ type TtyTerminal interface {
type Driver interface { type Driver interface {
Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code
// Exec executes the process in an existing container, blocks until the process exits and returns the exit code
Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, startCallback StartCallback) (int, error)
Kill(c *Command, sig int) error Kill(c *Command, sig int) error
Pause(c *Command) error Pause(c *Command) error
Unpause(c *Command) error Unpause(c *Command) error

View file

@ -527,3 +527,7 @@ func (t *TtyConsole) Close() error {
t.SlavePty.Close() t.SlavePty.Close()
return t.MasterPty.Close() return t.MasterPty.Close()
} }
func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
return -1, fmt.Errorf("Unsupported: Exec is not supported by the lxc driver")
}

View file

@ -22,6 +22,7 @@ import (
"github.com/docker/libcontainer/cgroups/systemd" "github.com/docker/libcontainer/cgroups/systemd"
consolepkg "github.com/docker/libcontainer/console" consolepkg "github.com/docker/libcontainer/console"
"github.com/docker/libcontainer/namespaces" "github.com/docker/libcontainer/namespaces"
_ "github.com/docker/libcontainer/namespaces/nsenter"
"github.com/docker/libcontainer/system" "github.com/docker/libcontainer/system"
) )

View file

@ -0,0 +1,70 @@
// +build linux
package native
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"github.com/docker/docker/daemon/execdriver"
"github.com/docker/docker/reexec"
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/namespaces"
)
const execCommandName = "nsenter-exec"
func init() {
reexec.Register(execCommandName, nsenterExec)
}
func nsenterExec() {
runtime.LockOSThread()
// User args are passed after '--' in the command line.
userArgs := findUserArgs()
config, err := loadConfigFromFd()
if err != nil {
log.Fatalf("docker-exec: unable to receive config from sync pipe: %s", err)
}
if err := namespaces.FinalizeSetns(config, userArgs); err != nil {
log.Fatalf("docker-exec: failed to exec: %s", err)
}
}
// TODO(vishh): Add support for running in priviledged mode and running as a different user.
func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
active := d.activeContainers[c.ID]
if active == nil {
return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
}
state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
if err != nil {
return -1, fmt.Errorf("State unavailable for container with ID %s. The container may have been cleaned up already. Error: %s", c.ID, err)
}
var term execdriver.Terminal
if processConfig.Tty {
term, err = NewTtyConsole(processConfig, pipes)
} else {
term, err = execdriver.NewStdConsole(processConfig, pipes)
}
processConfig.Terminal = term
args := append([]string{processConfig.Entrypoint}, processConfig.Arguments...)
return namespaces.ExecIn(active.container, state, args, os.Args[0], "exec", processConfig.Stdin, processConfig.Stdout, processConfig.Stderr, processConfig.Console,
func(cmd *exec.Cmd) {
if startCallback != nil {
startCallback(&c.ProcessConfig, cmd.Process.Pid)
}
})
}

View file

@ -0,0 +1,35 @@
// +build linux
package native
import (
"os"
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/syncpipe"
)
func findUserArgs() []string {
for i, a := range os.Args {
if a == "--" {
return os.Args[i+1:]
}
}
return []string{}
}
// loadConfigFromFd loads a container's config from the sync pipe that is provided by
// fd 3 when running a process
func loadConfigFromFd() (*libcontainer.Config, error) {
syncPipe, err := syncpipe.NewSyncPipeFromFd(0, 3)
if err != nil {
return nil, err
}
var config *libcontainer.Config
if err := syncPipe.ReadFromParent(&config); err != nil {
return nil, err
}
return config, nil
}

View file

@ -19,6 +19,7 @@ func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status {
if err != nil { if err != nil {
return job.Error(err) return job.Error(err)
} }
if container := daemon.Get(name); container != nil { if container := daemon.Get(name); container != nil {
if err := container.Resize(height, width); err != nil { if err := container.Resize(height, width); err != nil {
return job.Error(err) return job.Error(err)
@ -27,3 +28,26 @@ func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status {
} }
return job.Errorf("No such container: %s", name) return job.Errorf("No such container: %s", name)
} }
func (daemon *Daemon) ContainerExecResize(job *engine.Job) engine.Status {
if len(job.Args) != 3 {
return job.Errorf("Not enough arguments. Usage: %s EXEC HEIGHT WIDTH\n", job.Name)
}
name := job.Args[0]
height, err := strconv.Atoi(job.Args[1])
if err != nil {
return job.Error(err)
}
width, err := strconv.Atoi(job.Args[2])
if err != nil {
return job.Error(err)
}
execConfig, err := daemon.getExecConfig(name)
if err != nil {
return job.Error(err)
}
if err := execConfig.Resize(height, width); err != nil {
return job.Error(err)
}
return engine.StatusOK
}

29
docs/man/docker-exec.md Normal file
View file

@ -0,0 +1,29 @@
% DOCKER(1) Docker User Manuals
% Docker Community
% SEPT 2014
# NAME
docker-exec - Run a command in an existing container
# SYNOPSIS
**docker exec**
[**-d**|**--detach**[=*false*]]
[**-i**|**--interactive**[=*false*]]
[**-t**|**--tty**[=*false*]]
CONTAINER COMMAND [ARG...]
# DESCRIPTION
Run a process in an existing container. The existing CONTAINER needs to be active.
# Options
**-d**, **--detach**=*true*|*false*
Detached mode. This runs the new process in the background.
**-i**, **--interactive**=*true*|*false*
When set to true, keep STDIN open even if not attached. The default is false.
**-t**, **--tty**=*true*|*false*
When set to true Docker can allocate a pseudo-tty and attach to the standard
input of the process. This can be used, for example, to run a throwaway
interactive shell. The default value is false.

View file

@ -1295,6 +1295,35 @@ It is even useful to cherry-pick particular tags of an image repository
$ sudo docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy $ sudo docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy
## exec
Usage: docker exec CONTAINER COMMAND [ARG...]
Run a command in an existing container
-d, --detach=false Detached mode: run the process in the background and exit
-i, --interactive=false Keep STDIN open even if not attached
-t, --tty=false Allocate a pseudo-TTY
The `docker exec` command runs a user specified command as a new process in an existing
user specified container. The container needs to be active.
The `docker exec` command will typically be used after `docker run`.
### Examples:
$ sudo docker run --name ubuntu_bash --rm -i -t ubuntu bash
This will create a container named 'ubuntu_bash' and start a bash session.
$ sudo docker exec -d ubuntu_bash touch /tmp/execWorks
This will create a new file '/tmp/execWorks' inside the existing and active container
'ubuntu_bash', in the background.
$ sudo docker exec ubuntu_bash -it bash
This will create a new bash session in the container 'ubuntu_bash'.
## search ## search

View file

@ -122,6 +122,7 @@ func (eng *Engine) Job(name string, args ...string) *Job {
Stdout: NewOutput(), Stdout: NewOutput(),
Stderr: NewOutput(), Stderr: NewOutput(),
env: &Env{}, env: &Env{},
closeIO: true,
} }
if eng.Logging { if eng.Logging {
job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr)) job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr))

View file

@ -31,6 +31,7 @@ type Job struct {
handler Handler handler Handler
status Status status Status
end time.Time end time.Time
closeIO bool
} }
type Status int type Status int
@ -78,6 +79,7 @@ func (job *Job) Run() error {
job.status = job.handler(job) job.status = job.handler(job)
job.end = time.Now() job.end = time.Now()
} }
if job.closeIO {
// Wait for all background tasks to complete // Wait for all background tasks to complete
if err := job.Stdout.Close(); err != nil { if err := job.Stdout.Close(); err != nil {
return err return err
@ -88,9 +90,11 @@ func (job *Job) Run() error {
if err := job.Stdin.Close(); err != nil { if err := job.Stdin.Close(); err != nil {
return err return err
} }
}
if job.status != 0 { if job.status != 0 {
return fmt.Errorf("%s", Tail(errorMessage, 1)) return fmt.Errorf("%s", Tail(errorMessage, 1))
} }
return nil return nil
} }
@ -228,3 +232,7 @@ func (job *Job) Error(err error) Status {
func (job *Job) StatusCode() int { func (job *Job) StatusCode() int {
return int(job.status) return int(job.status)
} }
func (job *Job) SetCloseIO(val bool) {
job.closeIO = val
}

View file

@ -1994,3 +1994,78 @@ func TestRunPortInUse(t *testing.T) {
deleteAllContainers() deleteAllContainers()
logDone("run - fail if port already in use") logDone("run - fail if port already in use")
} }
// "test" should be printed by docker exec
func TestDockerExec(t *testing.T) {
runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
errorOut(err, t, out)
execCmd := exec.Command(dockerBinary, "exec", "testing", "cat", "/tmp/file")
out, _, err = runCommandWithOutput(execCmd)
errorOut(err, t, out)
out = strings.Trim(out, "\r\n")
if expected := "test"; out != expected {
t.Errorf("container exec should've printed %q but printed %q", expected, out)
}
deleteAllContainers()
logDone("exec - basic test")
}
// "test" should be printed by docker exec
func TestDockerExecInteractive(t *testing.T) {
runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100")
out, _, _, err := runCommandWithStdoutStderr(runCmd)
errorOut(err, t, out)
execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh")
stdin, err := execCmd.StdinPipe()
if err != nil {
t.Fatal(err)
}
stdout, err := execCmd.StdoutPipe()
if err != nil {
t.Fatal(err)
}
if err := execCmd.Start(); err != nil {
t.Fatal(err)
}
if _, err := stdin.Write([]byte("cat /tmp/file\n")); err != nil {
t.Fatal(err)
}
r := bufio.NewReader(stdout)
line, err := r.ReadString('\n')
if err != nil {
t.Fatal(err)
}
line = strings.TrimSpace(line)
if line != "test" {
t.Fatalf("Output should be 'test', got '%q'", line)
}
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
finish := make(chan struct{})
go func() {
if err := execCmd.Wait(); err != nil {
t.Fatal(err)
}
close(finish)
}()
select {
case <-finish:
case <-time.After(1 * time.Second):
t.Fatal("docker exec failed to exit on stdin close")
}
deleteAllContainers()
logDone("exec - Interactive test")
}

75
runconfig/exec.go Normal file
View file

@ -0,0 +1,75 @@
package runconfig
import (
"github.com/docker/docker/engine"
flag "github.com/docker/docker/pkg/mflag"
)
type ExecConfig struct {
User string
Privileged bool
Tty bool
Container string
AttachStdin bool
AttachStderr bool
AttachStdout bool
Detach bool
Cmd []string
}
func ExecConfigFromJob(job *engine.Job) *ExecConfig {
execConfig := &ExecConfig{
User: job.Getenv("User"),
Privileged: job.GetenvBool("Privileged"),
Tty: job.GetenvBool("Tty"),
Container: job.Getenv("Container"),
AttachStdin: job.GetenvBool("AttachStdin"),
AttachStderr: job.GetenvBool("AttachStderr"),
AttachStdout: job.GetenvBool("AttachStdout"),
}
if cmd := job.GetenvList("Cmd"); cmd != nil {
execConfig.Cmd = cmd
}
return execConfig
}
func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) {
var (
flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached")
flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY")
flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background")
execCmd []string
container string
)
if err := cmd.Parse(args); err != nil {
return nil, err
}
parsedArgs := cmd.Args()
if len(parsedArgs) > 1 {
container = cmd.Arg(0)
execCmd = parsedArgs[1:]
}
execConfig := &ExecConfig{
// TODO(vishh): Expose '-u' flag once it is supported.
User: "",
// TODO(vishh): Expose '-p' flag once it is supported.
Privileged: false,
Tty: *flTty,
Cmd: execCmd,
Container: container,
Detach: *flDetach,
}
// If -d is not set, attach to everything by default
if !*flDetach {
execConfig.AttachStdout = true
execConfig.AttachStderr = true
if *flStdin {
execConfig.AttachStdin = true
}
}
return execConfig, nil
}