mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Refactoring execdriver.Command and Container structs to support 'docker exec' and other
similar features in the future. Docker-DCO-1.1-Signed-off-by: Vishnu Kannan <vishnuk@google.com> (github: vishh)
This commit is contained in:
parent
9ffb0f76f2
commit
4aa5da278f
15 changed files with 150 additions and 134 deletions
|
@ -103,7 +103,6 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
|
|||
}
|
||||
|
||||
<-daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr)
|
||||
|
||||
// If we are in stdinonce mode, wait for the process to end
|
||||
// otherwise, simply return
|
||||
if container.Config.StdinOnce && !container.Config.Tty {
|
||||
|
@ -128,7 +127,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
|
||||
if stdin != nil && container.Config.OpenStdin {
|
||||
nJobs++
|
||||
if cStdin, err := container.StdinPipe(); err != nil {
|
||||
if cStdin, err := container.StdConfig.StdinPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
go func() {
|
||||
|
@ -164,7 +163,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
}
|
||||
if stdout != nil {
|
||||
nJobs++
|
||||
if p, err := container.StdoutPipe(); err != nil {
|
||||
if p, err := container.StdConfig.StdoutPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
cStdout = p
|
||||
|
@ -193,7 +192,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
if stdinCloser != nil {
|
||||
defer stdinCloser.Close()
|
||||
}
|
||||
if cStdout, err := container.StdoutPipe(); err != nil {
|
||||
if cStdout, err := container.StdConfig.StdoutPipe(); err != nil {
|
||||
log.Errorf("attach: stdout pipe: %s", err)
|
||||
} else {
|
||||
io.Copy(&utils.NopWriter{}, cStdout)
|
||||
|
@ -202,7 +201,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
}
|
||||
if stderr != nil {
|
||||
nJobs++
|
||||
if p, err := container.StderrPipe(); err != nil {
|
||||
if p, err := container.StdConfig.StderrPipe(); err != nil {
|
||||
errors <- err
|
||||
} else {
|
||||
cStderr = p
|
||||
|
@ -232,7 +231,7 @@ func (daemon *Daemon) Attach(container *Container, stdin io.ReadCloser, stdinClo
|
|||
defer stdinCloser.Close()
|
||||
}
|
||||
|
||||
if cStderr, err := container.StderrPipe(); err != nil {
|
||||
if cStderr, err := container.StdConfig.StderrPipe(); err != nil {
|
||||
log.Errorf("attach: stdout pipe: %s", err)
|
||||
} else {
|
||||
io.Copy(&utils.NopWriter{}, cStderr)
|
||||
|
|
|
@ -41,6 +41,13 @@ var (
|
|||
ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.")
|
||||
)
|
||||
|
||||
type StdConfig struct {
|
||||
stdout *broadcastwriter.BroadcastWriter
|
||||
stderr *broadcastwriter.BroadcastWriter
|
||||
stdin io.ReadCloser
|
||||
stdinPipe io.WriteCloser
|
||||
}
|
||||
|
||||
type Container struct {
|
||||
*State
|
||||
root string // Path to the "home" of the container, including metadata.
|
||||
|
@ -66,10 +73,7 @@ type Container struct {
|
|||
ExecDriver string
|
||||
|
||||
command *execdriver.Command
|
||||
stdout *broadcastwriter.BroadcastWriter
|
||||
stderr *broadcastwriter.BroadcastWriter
|
||||
stdin io.ReadCloser
|
||||
stdinPipe io.WriteCloser
|
||||
StdConfig StdConfig
|
||||
|
||||
daemon *Daemon
|
||||
MountLabel, ProcessLabel string
|
||||
|
@ -247,26 +251,31 @@ func populateCommand(c *Container, env []string) error {
|
|||
CpuShares: c.Config.CpuShares,
|
||||
Cpuset: c.Config.Cpuset,
|
||||
}
|
||||
|
||||
processConfig := execdriver.ProcessConfig{
|
||||
Privileged: c.hostConfig.Privileged,
|
||||
Entrypoint: c.Path,
|
||||
Arguments: c.Args,
|
||||
Tty: c.Config.Tty,
|
||||
User: c.Config.User,
|
||||
}
|
||||
processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
|
||||
processConfig.Env = env
|
||||
c.command = &execdriver.Command{
|
||||
ID: c.ID,
|
||||
Privileged: c.hostConfig.Privileged,
|
||||
Rootfs: c.RootfsPath(),
|
||||
InitPath: "/.dockerinit",
|
||||
Entrypoint: c.Path,
|
||||
Arguments: c.Args,
|
||||
WorkingDir: c.Config.WorkingDir,
|
||||
Network: en,
|
||||
Tty: c.Config.Tty,
|
||||
User: c.Config.User,
|
||||
Config: context,
|
||||
Resources: resources,
|
||||
AllowedDevices: allowedDevices,
|
||||
AutoCreatedDevices: autoCreatedDevices,
|
||||
CapAdd: c.hostConfig.CapAdd,
|
||||
CapDrop: c.hostConfig.CapDrop,
|
||||
ProcessConfig: processConfig,
|
||||
}
|
||||
c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
|
||||
c.command.Env = env
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -329,7 +338,7 @@ func (container *Container) Run() error {
|
|||
}
|
||||
|
||||
func (container *Container) Output() (output []byte, err error) {
|
||||
pipe, err := container.StdoutPipe()
|
||||
pipe, err := container.StdConfig.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -342,7 +351,7 @@ func (container *Container) Output() (output []byte, err error) {
|
|||
return output, err
|
||||
}
|
||||
|
||||
// Container.StdinPipe returns a WriteCloser which can be used to feed data
|
||||
// StdConfig.StdinPipe returns a WriteCloser which can be used to feed data
|
||||
// to the standard input of the container's active process.
|
||||
// Container.StdoutPipe and Container.StderrPipe each return a ReadCloser
|
||||
// which can be used to retrieve the standard output (and error) generated
|
||||
|
@ -350,31 +359,31 @@ func (container *Container) Output() (output []byte, err error) {
|
|||
// copied and delivered to all StdoutPipe and StderrPipe consumers, using
|
||||
// a kind of "broadcaster".
|
||||
|
||||
func (container *Container) StdinPipe() (io.WriteCloser, error) {
|
||||
return container.stdinPipe, nil
|
||||
func (stdConfig *StdConfig) StdinPipe() (io.WriteCloser, error) {
|
||||
return stdConfig.stdinPipe, nil
|
||||
}
|
||||
|
||||
func (container *Container) StdoutPipe() (io.ReadCloser, error) {
|
||||
func (stdConfig *StdConfig) StdoutPipe() (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
container.stdout.AddWriter(writer, "")
|
||||
stdConfig.stdout.AddWriter(writer, "")
|
||||
return utils.NewBufReader(reader), nil
|
||||
}
|
||||
|
||||
func (container *Container) StderrPipe() (io.ReadCloser, error) {
|
||||
func (stdConfig *StdConfig) StderrPipe() (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
container.stderr.AddWriter(writer, "")
|
||||
stdConfig.stderr.AddWriter(writer, "")
|
||||
return utils.NewBufReader(reader), nil
|
||||
}
|
||||
|
||||
func (container *Container) StdoutLogPipe() io.ReadCloser {
|
||||
reader, writer := io.Pipe()
|
||||
container.stdout.AddWriter(writer, "stdout")
|
||||
container.StdConfig.stdout.AddWriter(writer, "stdout")
|
||||
return utils.NewBufReader(reader)
|
||||
}
|
||||
|
||||
func (container *Container) StderrLogPipe() io.ReadCloser {
|
||||
reader, writer := io.Pipe()
|
||||
container.stderr.AddWriter(writer, "stderr")
|
||||
container.StdConfig.stderr.AddWriter(writer, "stderr")
|
||||
return utils.NewBufReader(reader)
|
||||
}
|
||||
|
||||
|
@ -631,7 +640,7 @@ func (container *Container) Restart(seconds int) error {
|
|||
}
|
||||
|
||||
func (container *Container) Resize(h, w int) error {
|
||||
return container.command.Terminal.Resize(h, w)
|
||||
return container.command.ProcessConfig.Terminal.Resize(h, w)
|
||||
}
|
||||
|
||||
func (container *Container) ExportRw() (archive.Archive, error) {
|
||||
|
@ -815,7 +824,7 @@ func (container *Container) Exposes(p nat.Port) bool {
|
|||
}
|
||||
|
||||
func (container *Container) GetPtyMaster() (*os.File, error) {
|
||||
ttyConsole, ok := container.command.Terminal.(execdriver.TtyTerminal)
|
||||
ttyConsole, ok := container.command.ProcessConfig.Terminal.(execdriver.TtyTerminal)
|
||||
if !ok {
|
||||
return nil, ErrNoTTY
|
||||
}
|
||||
|
@ -1083,11 +1092,11 @@ func (container *Container) startLoggingToDisk() error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := container.daemon.LogToDisk(container.stdout, pth, "stdout"); err != nil {
|
||||
if err := container.daemon.LogToDisk(container.StdConfig.stdout, pth, "stdout"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := container.daemon.LogToDisk(container.stderr, pth, "stderr"); err != nil {
|
||||
if err := container.daemon.LogToDisk(container.StdConfig.stderr, pth, "stderr"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -195,13 +195,13 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
|
|||
container.daemon = daemon
|
||||
|
||||
// Attach to stdout and stderr
|
||||
container.stderr = broadcastwriter.New()
|
||||
container.stdout = broadcastwriter.New()
|
||||
container.StdConfig.stderr = broadcastwriter.New()
|
||||
container.StdConfig.stdout = broadcastwriter.New()
|
||||
// Attach to stdin
|
||||
if container.Config.OpenStdin {
|
||||
container.stdin, container.stdinPipe = io.Pipe()
|
||||
container.StdConfig.stdin, container.StdConfig.stdinPipe = io.Pipe()
|
||||
} else {
|
||||
container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
|
||||
container.StdConfig.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
|
||||
}
|
||||
// done
|
||||
daemon.containers.Add(container.ID, container)
|
||||
|
@ -229,7 +229,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
|
|||
ID: container.ID,
|
||||
}
|
||||
var err error
|
||||
cmd.Process, err = os.FindProcess(existingPid)
|
||||
cmd.ProcessConfig.Process, err = os.FindProcess(existingPid)
|
||||
if err != nil {
|
||||
log.Debugf("cannot find existing process for %d", existingPid)
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ var (
|
|||
ErrDriverNotFound = errors.New("The requested docker init has not been found")
|
||||
)
|
||||
|
||||
type StartCallback func(*Command)
|
||||
type StartCallback func(*ProcessConfig)
|
||||
|
||||
// Driver specific information based on
|
||||
// processes registered with the driver
|
||||
|
@ -80,20 +80,27 @@ type Mount struct {
|
|||
Private bool `json:"private"`
|
||||
}
|
||||
|
||||
// Process wrapps an os/exec.Cmd to add more metadata
|
||||
type Command struct {
|
||||
// Describes a process that will be run inside a container.
|
||||
type ProcessConfig struct {
|
||||
exec.Cmd `json:"-"`
|
||||
|
||||
Privileged bool `json:"privileged"`
|
||||
User string `json:"user"`
|
||||
Tty bool `json:"tty"`
|
||||
Entrypoint string `json:"entrypoint"`
|
||||
Arguments []string `json:"arguments"`
|
||||
Terminal Terminal `json:"-"` // standard or tty terminal
|
||||
ContainerPid int `json:"container_pid"` // the pid for the process inside a container
|
||||
Console string `json:"-"` // dev/console path
|
||||
}
|
||||
|
||||
// Process wrapps an os/exec.Cmd to add more metadata
|
||||
type Command struct {
|
||||
ID string `json:"id"`
|
||||
Privileged bool `json:"privileged"`
|
||||
User string `json:"user"`
|
||||
Rootfs string `json:"rootfs"` // root fs of the container
|
||||
InitPath string `json:"initpath"` // dockerinit
|
||||
Entrypoint string `json:"entrypoint"`
|
||||
Arguments []string `json:"arguments"`
|
||||
WorkingDir string `json:"working_dir"`
|
||||
ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver
|
||||
Tty bool `json:"tty"`
|
||||
Network *Network `json:"network"`
|
||||
Config map[string][]string `json:"config"` // generic values that specific drivers can consume
|
||||
Resources *Resources `json:"resources"`
|
||||
|
@ -103,13 +110,11 @@ type Command struct {
|
|||
CapAdd []string `json:"cap_add"`
|
||||
CapDrop []string `json:"cap_drop"`
|
||||
|
||||
Terminal Terminal `json:"-"` // standard or tty terminal
|
||||
Console string `json:"-"` // dev/console path
|
||||
ContainerPid int `json:"container_pid"` // the pid for the process inside a container
|
||||
ProcessConfig ProcessConfig `json:"process_config"` // Describes the init process of the container.
|
||||
}
|
||||
|
||||
// Return the pid of the process
|
||||
// If the process is nil -1 will be returned
|
||||
func (c *Command) Pid() int {
|
||||
return c.ContainerPid
|
||||
func (processConfig *ProcessConfig) Pid() int {
|
||||
return processConfig.ContainerPid
|
||||
}
|
||||
|
|
|
@ -59,12 +59,12 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
err error
|
||||
)
|
||||
|
||||
if c.Tty {
|
||||
term, err = NewTtyConsole(c, pipes)
|
||||
if c.ProcessConfig.Tty {
|
||||
term, err = NewTtyConsole(&c.ProcessConfig, pipes)
|
||||
} else {
|
||||
term, err = execdriver.NewStdConsole(c, pipes)
|
||||
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
|
||||
}
|
||||
c.Terminal = term
|
||||
c.ProcessConfig.Terminal = term
|
||||
|
||||
c.Mounts = append(c.Mounts, execdriver.Mount{
|
||||
Source: d.initPath,
|
||||
|
@ -98,11 +98,11 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
"-mtu", strconv.Itoa(c.Network.Mtu),
|
||||
)
|
||||
|
||||
if c.User != "" {
|
||||
params = append(params, "-u", c.User)
|
||||
if c.ProcessConfig.User != "" {
|
||||
params = append(params, "-u", c.ProcessConfig.User)
|
||||
}
|
||||
|
||||
if c.Privileged {
|
||||
if c.ProcessConfig.Privileged {
|
||||
if d.apparmor {
|
||||
params[0] = path.Join(d.root, "lxc-start-unconfined")
|
||||
|
||||
|
@ -122,8 +122,8 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":")))
|
||||
}
|
||||
|
||||
params = append(params, "--", c.Entrypoint)
|
||||
params = append(params, c.Arguments...)
|
||||
params = append(params, "--", c.ProcessConfig.Entrypoint)
|
||||
params = append(params, c.ProcessConfig.Arguments...)
|
||||
|
||||
if d.sharedRoot {
|
||||
// lxc-start really needs / to be non-shared, or all kinds of stuff break
|
||||
|
@ -149,14 +149,14 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
if err != nil {
|
||||
aname = name
|
||||
}
|
||||
c.Path = aname
|
||||
c.Args = append([]string{name}, arg...)
|
||||
c.ProcessConfig.Path = aname
|
||||
c.ProcessConfig.Args = append([]string{name}, arg...)
|
||||
|
||||
if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if err := c.Start(); err != nil {
|
||||
if err := c.ProcessConfig.Start(); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
)
|
||||
|
||||
go func() {
|
||||
if err := c.Wait(); err != nil {
|
||||
if err := c.ProcessConfig.Wait(); err != nil {
|
||||
if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0
|
||||
waitErr = err
|
||||
}
|
||||
|
@ -177,17 +177,17 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
// Poll lxc for RUNNING status
|
||||
pid, err := d.waitForStart(c, waitLock)
|
||||
if err != nil {
|
||||
if c.Process != nil {
|
||||
c.Process.Kill()
|
||||
c.Wait()
|
||||
if c.ProcessConfig.Process != nil {
|
||||
c.ProcessConfig.Process.Kill()
|
||||
c.ProcessConfig.Wait()
|
||||
}
|
||||
return -1, err
|
||||
}
|
||||
|
||||
c.ContainerPid = pid
|
||||
c.ProcessConfig.ContainerPid = pid
|
||||
|
||||
if startCallback != nil {
|
||||
startCallback(c)
|
||||
startCallback(&c.ProcessConfig)
|
||||
}
|
||||
|
||||
<-waitLock
|
||||
|
@ -198,10 +198,10 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
/// Return the exit code of the process
|
||||
// if the process has not exited -1 will be returned
|
||||
func getExitCode(c *execdriver.Command) int {
|
||||
if c.ProcessState == nil {
|
||||
if c.ProcessConfig.ProcessState == nil {
|
||||
return -1
|
||||
}
|
||||
return c.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
return c.ProcessConfig.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
}
|
||||
|
||||
func (d *driver) Kill(c *execdriver.Command, sig int) error {
|
||||
|
@ -442,7 +442,7 @@ func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) {
|
|||
}
|
||||
|
||||
func (d *driver) generateEnvConfig(c *execdriver.Command) error {
|
||||
data, err := json.Marshal(c.Env)
|
||||
data, err := json.Marshal(c.ProcessConfig.Env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -462,7 +462,7 @@ type TtyConsole struct {
|
|||
SlavePty *os.File
|
||||
}
|
||||
|
||||
func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) {
|
||||
func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) {
|
||||
// lxc is special in that we cannot create the master outside of the container without
|
||||
// opening the slave because we have nothing to provide to the cmd. We have to open both then do
|
||||
// the crazy setup on command right now instead of passing the console path to lxc and telling it
|
||||
|
@ -478,12 +478,12 @@ func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyCo
|
|||
SlavePty: ptySlave,
|
||||
}
|
||||
|
||||
if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
|
||||
if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil {
|
||||
tty.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
command.Console = tty.SlavePty.Name()
|
||||
processConfig.Console = tty.SlavePty.Name()
|
||||
|
||||
return tty, nil
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ lxc.se_context = {{ .ProcessLabel}}
|
|||
# no controlling tty at all
|
||||
lxc.tty = 1
|
||||
|
||||
{{if .Privileged}}
|
||||
{{if .ProcessConfig.Privileged}}
|
||||
lxc.cgroup.devices.allow = a
|
||||
{{else}}
|
||||
# no implicit access to devices
|
||||
|
@ -66,7 +66,7 @@ lxc.pivotdir = lxc_putold
|
|||
lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0
|
||||
lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0
|
||||
|
||||
{{if .Tty}}
|
||||
{{if .ProcessConfig.Tty}}
|
||||
lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0
|
||||
{{end}}
|
||||
|
||||
|
@ -81,7 +81,7 @@ lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabS
|
|||
{{end}}
|
||||
{{end}}
|
||||
|
||||
{{if .Privileged}}
|
||||
{{if .ProcessConfig.Privileged}}
|
||||
{{if .AppArmor}}
|
||||
lxc.aa_profile = unconfined
|
||||
{{else}}
|
||||
|
|
|
@ -77,9 +77,11 @@ func TestCustomLxcConfig(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
command := &execdriver.Command{
|
||||
ID: "1",
|
||||
processConfig := execdriver.ProcessConfig{
|
||||
Privileged: false,
|
||||
}
|
||||
command := &execdriver.Command{
|
||||
ID: "1",
|
||||
Config: map[string][]string{
|
||||
"lxc": {
|
||||
"lxc.utsname = docker",
|
||||
|
@ -90,6 +92,7 @@ func TestCustomLxcConfig(t *testing.T) {
|
|||
Mtu: 1500,
|
||||
Interface: nil,
|
||||
},
|
||||
ProcessConfig: processConfig,
|
||||
}
|
||||
|
||||
p, err := driver.generateLXCConfig(command)
|
||||
|
|
|
@ -23,11 +23,11 @@ import (
|
|||
func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, error) {
|
||||
container := template.New()
|
||||
|
||||
container.Hostname = getEnv("HOSTNAME", c.Env)
|
||||
container.Tty = c.Tty
|
||||
container.User = c.User
|
||||
container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
|
||||
container.Tty = c.ProcessConfig.Tty
|
||||
container.User = c.ProcessConfig.User
|
||||
container.WorkingDir = c.WorkingDir
|
||||
container.Env = c.Env
|
||||
container.Env = c.ProcessConfig.Env
|
||||
container.Cgroups.Name = c.ID
|
||||
container.Cgroups.AllowedDevices = c.AllowedDevices
|
||||
container.MountConfig.DeviceNodes = c.AutoCreatedDevices
|
||||
|
@ -40,7 +40,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if c.Privileged {
|
||||
if c.ProcessConfig.Privileged {
|
||||
if err := d.setPrivileged(container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -68,26 +68,26 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
|
||||
var term execdriver.Terminal
|
||||
|
||||
if c.Tty {
|
||||
term, err = NewTtyConsole(c, pipes)
|
||||
if c.ProcessConfig.Tty {
|
||||
term, err = NewTtyConsole(&c.ProcessConfig, pipes)
|
||||
} else {
|
||||
term, err = execdriver.NewStdConsole(c, pipes)
|
||||
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
|
||||
}
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
c.Terminal = term
|
||||
c.ProcessConfig.Terminal = term
|
||||
|
||||
d.Lock()
|
||||
d.activeContainers[c.ID] = &activeContainer{
|
||||
container: container,
|
||||
cmd: &c.Cmd,
|
||||
cmd: &c.ProcessConfig.Cmd,
|
||||
}
|
||||
d.Unlock()
|
||||
|
||||
var (
|
||||
dataPath = filepath.Join(d.root, c.ID)
|
||||
args = append([]string{c.Entrypoint}, c.Arguments...)
|
||||
args = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...)
|
||||
)
|
||||
|
||||
if err := d.createContainerRoot(c.ID); err != nil {
|
||||
|
@ -99,9 +99,9 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
return -1, err
|
||||
}
|
||||
|
||||
return namespaces.Exec(container, c.Stdin, c.Stdout, c.Stderr, c.Console, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
|
||||
c.Path = d.initPath
|
||||
c.Args = append([]string{
|
||||
return namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
|
||||
c.ProcessConfig.Path = d.initPath
|
||||
c.ProcessConfig.Args = append([]string{
|
||||
DriverName,
|
||||
"-console", console,
|
||||
"-pipe", "3",
|
||||
|
@ -110,25 +110,25 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
|
|||
}, args...)
|
||||
|
||||
// set this to nil so that when we set the clone flags anything else is reset
|
||||
c.SysProcAttr = &syscall.SysProcAttr{
|
||||
c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
|
||||
Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
|
||||
}
|
||||
c.ExtraFiles = []*os.File{child}
|
||||
c.ProcessConfig.ExtraFiles = []*os.File{child}
|
||||
|
||||
c.Env = container.Env
|
||||
c.Dir = c.Rootfs
|
||||
c.ProcessConfig.Env = container.Env
|
||||
c.ProcessConfig.Dir = c.Rootfs
|
||||
|
||||
return &c.Cmd
|
||||
return &c.ProcessConfig.Cmd
|
||||
}, func() {
|
||||
if startCallback != nil {
|
||||
c.ContainerPid = c.Process.Pid
|
||||
startCallback(c)
|
||||
c.ProcessConfig.ContainerPid = c.ProcessConfig.Process.Pid
|
||||
startCallback(&c.ProcessConfig)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (d *driver) Kill(p *execdriver.Command, sig int) error {
|
||||
return syscall.Kill(p.Process.Pid, syscall.Signal(sig))
|
||||
return syscall.Kill(p.ProcessConfig.Process.Pid, syscall.Signal(sig))
|
||||
}
|
||||
|
||||
func (d *driver) Pause(c *execdriver.Command) error {
|
||||
|
@ -176,14 +176,14 @@ func (d *driver) Terminate(p *execdriver.Command) error {
|
|||
state = &libcontainer.State{InitStartTime: string(data)}
|
||||
}
|
||||
|
||||
currentStartTime, err := system.GetProcessStartTime(p.Process.Pid)
|
||||
currentStartTime, err := system.GetProcessStartTime(p.ProcessConfig.Process.Pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if state.InitStartTime == currentStartTime {
|
||||
err = syscall.Kill(p.Process.Pid, 9)
|
||||
syscall.Wait4(p.Process.Pid, nil, 0, nil)
|
||||
err = syscall.Kill(p.ProcessConfig.Process.Pid, 9)
|
||||
syscall.Wait4(p.ProcessConfig.Process.Pid, nil, 0, nil)
|
||||
}
|
||||
d.removeContainerRoot(p.ID)
|
||||
|
||||
|
@ -252,7 +252,7 @@ type TtyConsole struct {
|
|||
MasterPty *os.File
|
||||
}
|
||||
|
||||
func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) {
|
||||
func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) {
|
||||
ptyMaster, console, err := consolepkg.CreateMasterAndConsole()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -262,12 +262,12 @@ func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyCo
|
|||
MasterPty: ptyMaster,
|
||||
}
|
||||
|
||||
if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
|
||||
if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil {
|
||||
tty.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
command.Console = console
|
||||
processConfig.Console = console
|
||||
|
||||
return tty, nil
|
||||
}
|
||||
|
|
|
@ -8,10 +8,10 @@ import (
|
|||
type StdConsole struct {
|
||||
}
|
||||
|
||||
func NewStdConsole(command *Command, pipes *Pipes) (*StdConsole, error) {
|
||||
func NewStdConsole(processConfig *ProcessConfig, pipes *Pipes) (*StdConsole, error) {
|
||||
std := &StdConsole{}
|
||||
|
||||
if err := std.AttachPipes(&command.Cmd, pipes); err != nil {
|
||||
if err := std.AttachPipes(&processConfig.Cmd, pipes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return std, nil
|
||||
|
|
|
@ -128,7 +128,7 @@ func (m *containerMonitor) Start() error {
|
|||
return err
|
||||
}
|
||||
|
||||
pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)
|
||||
pipes := execdriver.NewPipes(m.container.StdConfig.stdin, m.container.StdConfig.stdout, m.container.StdConfig.stderr, m.container.Config.OpenStdin)
|
||||
|
||||
m.container.LogEvent("start")
|
||||
|
||||
|
@ -233,17 +233,17 @@ func (m *containerMonitor) shouldRestart(exitStatus int) bool {
|
|||
|
||||
// callback ensures that the container's state is properly updated after we
|
||||
// received ack from the execution drivers
|
||||
func (m *containerMonitor) callback(command *execdriver.Command) {
|
||||
if command.Tty {
|
||||
func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig) {
|
||||
if processConfig.Tty {
|
||||
// The callback is called after the process Start()
|
||||
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
|
||||
// which we close here.
|
||||
if c, ok := command.Stdout.(io.Closer); ok {
|
||||
if c, ok := processConfig.Stdout.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
|
||||
m.container.State.setRunning(command.Pid())
|
||||
m.container.State.setRunning(processConfig.Pid())
|
||||
|
||||
// signal that the process has started
|
||||
// close channel only if not closed
|
||||
|
@ -269,33 +269,33 @@ func (m *containerMonitor) resetContainer(lock bool) {
|
|||
}
|
||||
|
||||
if container.Config.OpenStdin {
|
||||
if err := container.stdin.Close(); err != nil {
|
||||
if err := container.StdConfig.stdin.Close(); err != nil {
|
||||
log.Errorf("%s: Error close stdin: %s", container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := container.stdout.Clean(); err != nil {
|
||||
if err := container.StdConfig.stdout.Clean(); err != nil {
|
||||
log.Errorf("%s: Error close stdout: %s", container.ID, err)
|
||||
}
|
||||
|
||||
if err := container.stderr.Clean(); err != nil {
|
||||
if err := container.StdConfig.stderr.Clean(); err != nil {
|
||||
log.Errorf("%s: Error close stderr: %s", container.ID, err)
|
||||
}
|
||||
|
||||
if container.command != nil && container.command.Terminal != nil {
|
||||
if err := container.command.Terminal.Close(); err != nil {
|
||||
if container.command != nil && container.command.ProcessConfig.Terminal != nil {
|
||||
if err := container.command.ProcessConfig.Terminal.Close(); err != nil {
|
||||
log.Errorf("%s: Error closing terminal: %s", container.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Re-create a brand new stdin pipe once the container exited
|
||||
if container.Config.OpenStdin {
|
||||
container.stdin, container.stdinPipe = io.Pipe()
|
||||
container.StdConfig.stdin, container.StdConfig.stdinPipe = io.Pipe()
|
||||
}
|
||||
|
||||
c := container.command.Cmd
|
||||
c := container.command.ProcessConfig.Cmd
|
||||
|
||||
container.command.Cmd = exec.Cmd{
|
||||
container.command.ProcessConfig.Cmd = exec.Cmd{
|
||||
Stdin: c.Stdin,
|
||||
Stdout: c.Stdout,
|
||||
Stderr: c.Stderr,
|
||||
|
|
|
@ -467,7 +467,7 @@ func TestAttachDisconnect(t *testing.T) {
|
|||
}
|
||||
|
||||
// Try to avoid the timeout in destroy. Best effort, don't check error
|
||||
cStdin, _ := container.StdinPipe()
|
||||
cStdin, _ := container.StdConfig.StdinPipe()
|
||||
cStdin.Close()
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
}
|
||||
|
|
|
@ -25,11 +25,11 @@ func TestRestartStdin(t *testing.T) {
|
|||
}
|
||||
defer daemon.Destroy(container)
|
||||
|
||||
stdin, err := container.StdinPipe()
|
||||
stdin, err := container.StdConfig.StdinPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stdout, err := container.StdoutPipe()
|
||||
stdout, err := container.StdConfig.StdoutPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -55,11 +55,11 @@ func TestRestartStdin(t *testing.T) {
|
|||
}
|
||||
|
||||
// Restart and try again
|
||||
stdin, err = container.StdinPipe()
|
||||
stdin, err = container.StdConfig.StdinPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stdout, err = container.StdoutPipe()
|
||||
stdout, err = container.StdConfig.StdoutPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -101,11 +101,11 @@ func TestStdin(t *testing.T) {
|
|||
}
|
||||
defer daemon.Destroy(container)
|
||||
|
||||
stdin, err := container.StdinPipe()
|
||||
stdin, err := container.StdConfig.StdinPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stdout, err := container.StdoutPipe()
|
||||
stdout, err := container.StdConfig.StdoutPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -146,11 +146,11 @@ func TestTty(t *testing.T) {
|
|||
}
|
||||
defer daemon.Destroy(container)
|
||||
|
||||
stdin, err := container.StdinPipe()
|
||||
stdin, err := container.StdConfig.StdinPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stdout, err := container.StdoutPipe()
|
||||
stdout, err := container.StdConfig.StdoutPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -611,7 +611,7 @@ func TestRestore(t *testing.T) {
|
|||
}
|
||||
|
||||
// Simulate a crash/manual quit of dockerd: process dies, states stays 'Running'
|
||||
cStdin, _ := container2.StdinPipe()
|
||||
cStdin, _ := container2.StdConfig.StdinPipe()
|
||||
cStdin.Close()
|
||||
if _, err := container2.State.WaitStop(2 * time.Second); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -84,11 +84,11 @@ func containerFileExists(eng *engine.Engine, id, dir string, t log.Fataler) bool
|
|||
|
||||
func containerAttach(eng *engine.Engine, id string, t log.Fataler) (io.WriteCloser, io.ReadCloser) {
|
||||
c := getContainer(eng, id, t)
|
||||
i, err := c.StdinPipe()
|
||||
i, err := c.StdConfig.StdinPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
o, err := c.StdoutPipe()
|
||||
o, err := c.StdConfig.StdoutPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -289,7 +289,7 @@ func runContainer(eng *engine.Engine, r *daemon.Daemon, args []string, t *testin
|
|||
return "", err
|
||||
}
|
||||
defer r.Destroy(container)
|
||||
stdout, err := container.StdoutPipe()
|
||||
stdout, err := container.StdConfig.StdoutPipe()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue