Merge pull request #17662 from calavera/decouple_container_and_daemon

Remove daemon field from within container.
This commit is contained in:
Jess Frazelle 2015-11-04 10:21:58 -08:00
commit 55711a22e0
42 changed files with 1008 additions and 921 deletions

View File

@ -128,6 +128,14 @@ type Docker interface {
// Release releases a list of images that were retained for the time of a build.
// TODO: remove
Release(sessionID string, activeImages []string)
// Kill stops the container execution abruptly.
Kill(c *daemon.Container) error
// Mount mounts the root filesystem for the container.
Mount(c *daemon.Container) error
// Unmount unmounts the root filesystem for the container.
Unmount(c *daemon.Container) error
// Start starts a new container
Start(c *daemon.Container) error
}
// ImageCache abstracts an image cache store.

View File

@ -399,8 +399,8 @@ func run(b *Builder, args []string, attributes map[string]bool, original string)
// Ensure that we keep the container mounted until the commit
// to avoid unmounting and then mounting directly again
c.Mount()
defer c.Unmount()
b.docker.Mount(c)
defer b.docker.Unmount(c)
err = b.run(c)
if err != nil {

View File

@ -67,10 +67,10 @@ func (b *Builder) commit(id string, autoCmd *stringutils.StrSlice, comment strin
}
id = container.ID
if err := container.Mount(); err != nil {
if err := b.docker.Mount(container); err != nil {
return err
}
defer container.Unmount()
defer b.docker.Unmount(container)
}
container, err := b.docker.Container(id)
@ -201,7 +201,7 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalD
if err != nil {
return err
}
defer container.Unmount()
defer b.docker.Unmount(container)
b.tmpContainers[container.ID] = struct{}{}
comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)
@ -524,7 +524,7 @@ func (b *Builder) create() (*daemon.Container, error) {
if err != nil {
return nil, err
}
defer c.Unmount()
defer b.docker.Unmount(c)
for _, warning := range warnings {
fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning)
}
@ -549,7 +549,7 @@ func (b *Builder) run(c *daemon.Container) error {
}
//start the container
if err := c.Start(); err != nil {
if err := b.docker.Start(c); err != nil {
return err
}
@ -559,7 +559,7 @@ func (b *Builder) run(c *daemon.Container) error {
select {
case <-b.cancelled:
logrus.Debugln("Build cancelled, killing and removing container:", c.ID)
c.Kill()
b.docker.Kill(c)
b.removeContainer(c.ID)
case <-finished:
}

View File

@ -30,7 +30,7 @@ func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, err
res = res[1:]
}
return container.copy(res)
return daemon.containerCopy(container, res)
}
// ContainerStatPath stats the filesystem resource at the specified path in the
@ -41,7 +41,7 @@ func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.C
return nil, err
}
return container.StatPath(path)
return daemon.containerStatPath(container, path)
}
// ContainerArchivePath creates an archive of the filesystem resource at the
@ -53,7 +53,7 @@ func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io
return nil, nil, err
}
return container.ArchivePath(path)
return daemon.containerArchivePath(container, path)
}
// ContainerExtractToDir extracts the given archive to the specified location
@ -68,7 +68,7 @@ func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNon
return err
}
return container.ExtractToDir(path, noOverwriteDirNonDir, content)
return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content)
}
// resolvePath resolves the given path in the container to a resource on the
@ -131,18 +131,18 @@ func (container *Container) statPath(resolvedPath, absPath string) (stat *types.
}, nil
}
// StatPath stats the filesystem resource at the specified path in this
// containerStatPath stats the filesystem resource at the specified path in this
// container. Returns stat info about the resource.
func (container *Container) StatPath(path string) (stat *types.ContainerPathStat, err error) {
func (daemon *Daemon) containerStatPath(container *Container, path string) (stat *types.ContainerPathStat, err error) {
container.Lock()
defer container.Unlock()
if err = container.Mount(); err != nil {
if err = daemon.Mount(container); err != nil {
return nil, err
}
defer container.Unmount()
defer daemon.Unmount(container)
err = container.mountVolumes()
err = daemon.mountVolumes(container)
defer container.unmountVolumes(true)
if err != nil {
return nil, err
@ -156,10 +156,10 @@ func (container *Container) StatPath(path string) (stat *types.ContainerPathStat
return container.statPath(resolvedPath, absPath)
}
// ArchivePath creates an archive of the filesystem resource at the specified
// containerArchivePath creates an archive of the filesystem resource at the specified
// path in this container. Returns a tar archive of the resource and stat info
// about the resource.
func (container *Container) ArchivePath(path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
func (daemon *Daemon) containerArchivePath(container *Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
container.Lock()
defer func() {
@ -171,7 +171,7 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
}
}()
if err = container.Mount(); err != nil {
if err = daemon.Mount(container); err != nil {
return nil, nil, err
}
@ -180,11 +180,11 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
// unmount any volumes
container.unmountVolumes(true)
// unmount the container's rootfs
container.Unmount()
daemon.Unmount(container)
}
}()
if err = container.mountVolumes(); err != nil {
if err = daemon.mountVolumes(container); err != nil {
return nil, nil, err
}
@ -214,32 +214,32 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
content = ioutils.NewReadCloserWrapper(data, func() error {
err := data.Close()
container.unmountVolumes(true)
container.Unmount()
daemon.Unmount(container)
container.Unlock()
return err
})
container.logEvent("archive-path")
daemon.LogContainerEvent(container, "archive-path")
return content, stat, nil
}
// ExtractToDir extracts the given tar archive to the specified location in the
// containerExtractToDir extracts the given tar archive to the specified location in the
// filesystem of this container. The given path must be of a directory in the
// container. If it is not, the error will be ErrExtractPointNotDirectory. If
// noOverwriteDirNonDir is true then it will be an error if unpacking the
// given content would cause an existing directory to be replaced with a non-
// directory and vice versa.
func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
func (daemon *Daemon) containerExtractToDir(container *Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
container.Lock()
defer container.Unlock()
if err = container.Mount(); err != nil {
if err = daemon.Mount(container); err != nil {
return err
}
defer container.Unmount()
defer daemon.Unmount(container)
err = container.mountVolumes()
err = daemon.mountVolumes(container)
defer container.unmountVolumes(true)
if err != nil {
return err
@ -318,7 +318,72 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
return err
}
container.logEvent("extract-to-dir")
daemon.LogContainerEvent(container, "extract-to-dir")
return nil
}
func (daemon *Daemon) containerCopy(container *Container, resource string) (rc io.ReadCloser, err error) {
container.Lock()
defer func() {
if err != nil {
// Wait to unlock the container until the archive is fully read
// (see the ReadCloseWrapper func below) or if there is an error
// before that occurs.
container.Unlock()
}
}()
if err := daemon.Mount(container); err != nil {
return nil, err
}
defer func() {
if err != nil {
// unmount any volumes
container.unmountVolumes(true)
// unmount the container's rootfs
daemon.Unmount(container)
}
}()
if err := daemon.mountVolumes(container); err != nil {
return nil, err
}
basePath, err := container.GetResourcePath(resource)
if err != nil {
return nil, err
}
stat, err := os.Stat(basePath)
if err != nil {
return nil, err
}
var filter []string
if !stat.IsDir() {
d, f := filepath.Split(basePath)
basePath = d
filter = []string{f}
} else {
filter = []string{filepath.Base(basePath)}
basePath = filepath.Dir(basePath)
}
archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeFiles: filter,
})
if err != nil {
return nil, err
}
reader := ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
container.unmountVolumes(true)
daemon.Unmount(container)
container.Unlock()
return err
})
daemon.LogContainerEvent(container, "copy")
return reader, nil
}

View File

@ -2,7 +2,10 @@ package daemon
import (
"io"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/pkg/stdcopy"
)
@ -43,7 +46,7 @@ func (daemon *Daemon) ContainerAttachWithLogs(prefixOrName string, c *ContainerA
stderr = errStream
}
return container.attachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
return daemon.attachWithLogs(container, stdin, stdout, stderr, c.Logs, c.Stream)
}
// ContainerWsAttachWithLogsConfig attach with websockets, since all
@ -60,5 +63,61 @@ func (daemon *Daemon) ContainerWsAttachWithLogs(prefixOrName string, c *Containe
if err != nil {
return err
}
return container.attachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
return daemon.attachWithLogs(container, c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
}
func (daemon *Daemon) attachWithLogs(container *Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
if logs {
logDriver, err := daemon.getLogger(container)
if err != nil {
return err
}
cLog, ok := logDriver.(logger.LogReader)
if !ok {
return logger.ErrReadLogsNotSupported
}
logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1})
LogLoop:
for {
select {
case msg, ok := <-logs.Msg:
if !ok {
break LogLoop
}
if msg.Source == "stdout" && stdout != nil {
stdout.Write(msg.Line)
}
if msg.Source == "stderr" && stderr != nil {
stderr.Write(msg.Line)
}
case err := <-logs.Err:
logrus.Errorf("Error streaming logs: %v", err)
break LogLoop
}
}
}
daemon.LogContainerEvent(container, "attach")
//stream
if stream {
var stdinPipe io.ReadCloser
if stdin != nil {
r, w := io.Pipe()
go func() {
defer w.Close()
defer logrus.Debugf("Closing buffered stdin pipe")
io.Copy(w, stdin)
}()
stdinPipe = r
}
<-container.Attach(stdinPipe, stdout, stderr)
// If we are in stdinonce mode, wait for the process to end
// otherwise, simply return
if container.Config.StdinOnce && !container.Config.Tty {
container.WaitStop(-1 * time.Second)
}
}
return nil
}

View File

@ -9,5 +9,7 @@ func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
return nil, err
}
return container.changes()
container.Lock()
defer container.Unlock()
return daemon.changes(container)
}

View File

@ -2,6 +2,8 @@ package daemon
import (
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/runconfig"
)
@ -20,11 +22,11 @@ type ContainerCommitConfig struct {
// The image can optionally be tagged into a repository.
func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*image.Image, error) {
if c.Pause && !container.isPaused() {
container.pause()
defer container.unpause()
daemon.containerPause(container)
defer daemon.containerUnpause(container)
}
rwTar, err := container.exportContainerRw()
rwTar, err := daemon.exportContainerRw(container)
if err != nil {
return nil, err
}
@ -46,6 +48,19 @@ func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*i
return img, err
}
}
container.logEvent("commit")
daemon.LogContainerEvent(container, "commit")
return img, nil
}
func (daemon *Daemon) exportContainerRw(container *Container) (archive.Archive, error) {
archive, err := daemon.diff(container)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
return err
}),
nil
}

View File

@ -7,7 +7,6 @@ import (
"io"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
@ -20,8 +19,6 @@ import (
"github.com/docker/docker/daemon/logger/jsonfilelog"
"github.com/docker/docker/daemon/network"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/broadcaster"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/ioutils"
@ -33,7 +30,6 @@ import (
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/volume"
"github.com/docker/docker/volume/store"
)
var (
@ -79,7 +75,6 @@ type CommonContainer struct {
command *execdriver.Command
monitor *containerMonitor
execCommands *execStore
daemon *Daemon
// logDriver for closing
logDriver logger.Logger
logCopier *logger.Copier
@ -176,15 +171,6 @@ func (container *Container) writeHostConfig() error {
return json.NewEncoder(f).Encode(&container.hostConfig)
}
func (container *Container) logEvent(action string) {
d := container.daemon
d.EventsService.Log(
action,
container.ID,
container.Config.Image,
)
}
// GetResourcePath evaluates `path` in the scope of the container's basefs, with proper path
// sanitisation. Symlinks are all scoped to the basefs of the container, as
// though the container's basefs was `/`.
@ -225,91 +211,6 @@ func (container *Container) getRootResourcePath(path string) (string, error) {
return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root)
}
func (container *Container) exportContainerRw() (archive.Archive, error) {
if container.daemon == nil {
return nil, derr.ErrorCodeUnregisteredContainer.WithArgs(container.ID)
}
archive, err := container.daemon.diff(container)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
return err
}),
nil
}
// Start prepares the container to run by setting up everything the
// container needs, such as storage and networking, as well as links
// between containers. The container is left waiting for a signal to
// begin running.
func (container *Container) Start() (err error) {
container.Lock()
defer container.Unlock()
if container.Running {
return nil
}
if container.removalInProgress || container.Dead {
return derr.ErrorCodeContainerBeingRemoved
}
// if we encounter an error during start we need to ensure that any other
// setup has been cleaned up properly
defer func() {
if err != nil {
container.setError(err)
// if no one else has set it, make sure we don't leave it at zero
if container.ExitCode == 0 {
container.ExitCode = 128
}
container.toDisk()
container.cleanup()
container.logEvent("die")
}
}()
if err := container.conditionalMountOnStart(); err != nil {
return err
}
// Make sure NetworkMode has an acceptable value. We do this to ensure
// backwards API compatibility.
container.hostConfig = runconfig.SetDefaultNetModeIfBlank(container.hostConfig)
if err := container.initializeNetworking(); err != nil {
return err
}
linkedEnv, err := container.setupLinkedContainers()
if err != nil {
return err
}
if err := container.setupWorkingDirectory(); err != nil {
return err
}
env := container.createDaemonEnvironment(linkedEnv)
if err := populateCommand(container, env); err != nil {
return err
}
if !container.hostConfig.IpcMode.IsContainer() && !container.hostConfig.IpcMode.IsHost() {
if err := container.setupIpcDirs(); err != nil {
return err
}
}
mounts, err := container.setupMounts()
if err != nil {
return err
}
mounts = append(mounts, container.ipcMounts()...)
container.command.Mounts = mounts
return container.waitForStart()
}
// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data
// to the standard input of the container's active process.
// Container.StdoutPipe and Container.StderrPipe each return a ReadCloser
@ -334,205 +235,10 @@ func (streamConfig *streamConfig) StderrPipe() io.ReadCloser {
return ioutils.NewBufReader(reader)
}
// cleanup releases any network resources allocated to the container along with any rules
// around how containers are linked together. It also unmounts the container's root filesystem.
func (container *Container) cleanup() {
container.releaseNetwork()
container.unmountIpcMounts(detachMounted)
container.conditionalUnmountOnCleanup()
for _, eConfig := range container.execCommands.s {
container.daemon.unregisterExecCommand(eConfig)
}
if err := container.unmountVolumes(false); err != nil {
logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
}
}
// killSig sends the container the given signal. This wrapper for the
// host specific kill command prepares the container before attempting
// to send the signal. An error is returned if the container is paused
// or not running, or if there is a problem returned from the
// underlying kill command.
func (container *Container) killSig(sig int) error {
logrus.Debugf("Sending %d to %s", sig, container.ID)
container.Lock()
defer container.Unlock()
// We could unpause the container for them rather than returning this error
if container.Paused {
return derr.ErrorCodeUnpauseContainer.WithArgs(container.ID)
}
if !container.Running {
return derr.ErrorCodeNotRunning.WithArgs(container.ID)
}
// signal to the monitor that it should not restart the container
// after we send the kill signal
// ExitOnNext signals to the monitor that it should not restart the container
// after we send the kill signal.
func (container *Container) ExitOnNext() {
container.monitor.ExitOnNext()
// if the container is currently restarting we do not need to send the signal
// to the process. Telling the monitor that it should exit on it's next event
// loop is enough
if container.Restarting {
return nil
}
if err := container.daemon.kill(container, sig); err != nil {
return err
}
container.logEvent("kill")
return nil
}
// Wrapper aroung killSig() suppressing "no such process" error.
func (container *Container) killPossiblyDeadProcess(sig int) error {
err := container.killSig(sig)
if err == syscall.ESRCH {
logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.getPID(), sig)
return nil
}
return err
}
func (container *Container) pause() error {
container.Lock()
defer container.Unlock()
// We cannot Pause the container which is not running
if !container.Running {
return derr.ErrorCodeNotRunning.WithArgs(container.ID)
}
// We cannot Pause the container which is already paused
if container.Paused {
return derr.ErrorCodeAlreadyPaused.WithArgs(container.ID)
}
if err := container.daemon.execDriver.Pause(container.command); err != nil {
return err
}
container.Paused = true
container.logEvent("pause")
return nil
}
func (container *Container) unpause() error {
container.Lock()
defer container.Unlock()
// We cannot unpause the container which is not running
if !container.Running {
return derr.ErrorCodeNotRunning.WithArgs(container.ID)
}
// We cannot unpause the container which is not paused
if !container.Paused {
return derr.ErrorCodeNotPaused.WithArgs(container.ID)
}
if err := container.daemon.execDriver.Unpause(container.command); err != nil {
return err
}
container.Paused = false
container.logEvent("unpause")
return nil
}
// Kill forcefully terminates a container.
func (container *Container) Kill() error {
if !container.IsRunning() {
return derr.ErrorCodeNotRunning.WithArgs(container.ID)
}
// 1. Send SIGKILL
if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil {
// While normally we might "return err" here we're not going to
// because if we can't stop the container by this point then
// its probably because its already stopped. Meaning, between
// the time of the IsRunning() call above and now it stopped.
// Also, since the err return will be exec driver specific we can't
// look for any particular (common) error that would indicate
// that the process is already dead vs something else going wrong.
// So, instead we'll give it up to 2 more seconds to complete and if
// by that time the container is still running, then the error
// we got is probably valid and so we return it to the caller.
if container.IsRunning() {
container.WaitStop(2 * time.Second)
if container.IsRunning() {
return err
}
}
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := killProcessDirectly(container); err != nil {
return err
}
container.WaitStop(-1 * time.Second)
return nil
}
// Stop halts a container by sending a stop signal, waiting for the given
// duration in seconds, and then calling SIGKILL and waiting for the
// process to exit. If a negative duration is given, Stop will wait
// for the initial signal forever. If the container is not running Stop returns
// immediately.
func (container *Container) Stop(seconds int) error {
if !container.IsRunning() {
return nil
}
// 1. Send a SIGTERM
if err := container.killPossiblyDeadProcess(container.stopSignal()); err != nil {
logrus.Infof("Failed to send SIGTERM to the process, force killing")
if err := container.killPossiblyDeadProcess(9); err != nil {
return err
}
}
// 2. Wait for the process to exit on its own
if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil {
logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
// 3. If it doesn't, then send SIGKILL
if err := container.Kill(); err != nil {
container.WaitStop(-1 * time.Second)
logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it
}
}
container.logEvent("stop")
return nil
}
// Restart attempts to gracefully stop and then start the
// container. When stopping, wait for the given duration in seconds to
// gracefully stop, before forcefully terminating the container. If
// given a negative duration, wait forever for a graceful stop.
func (container *Container) Restart(seconds int) error {
// Avoid unnecessarily unmounting and then directly mounting
// the container when the container stops and then starts
// again
if err := container.Mount(); err == nil {
defer container.Unmount()
}
if err := container.Stop(seconds); err != nil {
return err
}
if err := container.Start(); err != nil {
return err
}
container.logEvent("restart")
return nil
}
// Resize changes the TTY of the process running inside the container
@ -544,58 +250,9 @@ func (container *Container) Resize(h, w int) error {
if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
return err
}
container.logEvent("resize")
return nil
}
func (container *Container) export() (archive.Archive, error) {
if err := container.Mount(); err != nil {
return nil, err
}
uidMaps, gidMaps := container.daemon.GetUIDGIDMaps()
archive, err := archive.TarWithOptions(container.basefs, &archive.TarOptions{
Compression: archive.Uncompressed,
UIDMaps: uidMaps,
GIDMaps: gidMaps,
})
if err != nil {
container.Unmount()
return nil, err
}
arch := ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
container.Unmount()
return err
})
container.logEvent("export")
return arch, err
}
// Mount sets container.basefs
func (container *Container) Mount() error {
return container.daemon.Mount(container)
}
func (container *Container) changes() ([]archive.Change, error) {
container.Lock()
defer container.Unlock()
return container.daemon.changes(container)
}
func (container *Container) getImage() (*image.Image, error) {
if container.daemon == nil {
return nil, derr.ErrorCodeImageUnregContainer
}
return container.daemon.graph.Get(container.ImageID)
}
// Unmount asks the daemon to release the layered filesystems that are
// mounted by the container.
func (container *Container) Unmount() error {
return container.daemon.unmount(container)
}
func (container *Container) hostConfigPath() (string, error) {
return container.getRootResourcePath("hostconfig.json")
}
@ -617,78 +274,13 @@ func validateID(id string) error {
return nil
}
func (container *Container) copy(resource string) (rc io.ReadCloser, err error) {
container.Lock()
defer func() {
if err != nil {
// Wait to unlock the container until the archive is fully read
// (see the ReadCloseWrapper func below) or if there is an error
// before that occurs.
container.Unlock()
}
}()
if err := container.Mount(); err != nil {
return nil, err
}
defer func() {
if err != nil {
// unmount any volumes
container.unmountVolumes(true)
// unmount the container's rootfs
container.Unmount()
}
}()
if err := container.mountVolumes(); err != nil {
return nil, err
}
basePath, err := container.GetResourcePath(resource)
if err != nil {
return nil, err
}
stat, err := os.Stat(basePath)
if err != nil {
return nil, err
}
var filter []string
if !stat.IsDir() {
d, f := filepath.Split(basePath)
basePath = d
filter = []string{f}
} else {
filter = []string{filepath.Base(basePath)}
basePath = filepath.Dir(basePath)
}
archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeFiles: filter,
})
if err != nil {
return nil, err
}
reader := ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
container.unmountVolumes(true)
container.Unmount()
container.Unlock()
return err
})
container.logEvent("copy")
return reader, nil
}
// Returns true if the container exposes a certain port
func (container *Container) exposes(p nat.Port) bool {
_, exists := container.Config.ExposedPorts[p]
return exists
}
func (container *Container) getLogConfig() runconfig.LogConfig {
func (container *Container) getLogConfig(defaultConfig runconfig.LogConfig) runconfig.LogConfig {
cfg := container.hostConfig.LogConfig
if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured
if cfg.Type == "" {
@ -697,17 +289,11 @@ func (container *Container) getLogConfig() runconfig.LogConfig {
return cfg
}
// Use daemon's default log config for containers
return container.daemon.defaultLogConfig
return defaultConfig
}
func (container *Container) getLogger() (logger.Logger, error) {
if container.logDriver != nil && container.IsRunning() {
return container.logDriver, nil
}
cfg := container.getLogConfig()
if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil {
return nil, err
}
// StartLogger starts a new logger driver for the container.
func (container *Container) StartLogger(cfg runconfig.LogConfig) (logger.Logger, error) {
c, err := logger.GetLogDriver(cfg.Type)
if err != nil {
return nil, derr.ErrorCodeLoggingFactory.WithArgs(err)
@ -735,44 +321,6 @@ func (container *Container) getLogger() (logger.Logger, error) {
return c(ctx)
}
func (container *Container) startLogging() error {
cfg := container.getLogConfig()
if cfg.Type == "none" {
return nil // do not start logging routines
}
l, err := container.getLogger()
if err != nil {
return derr.ErrorCodeInitLogger.WithArgs(err)
}
copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l)
container.logCopier = copier
copier.Run()
container.logDriver = l
// set LogPath field only for json-file logdriver
if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok {
container.LogPath = jl.LogPath()
}
return nil
}
func (container *Container) waitForStart() error {
container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy)
// block until we either receive an error from the initial start of the container's
// process or until the process is running in the container
select {
case <-container.monitor.startSignal:
case err := <-promise.Go(container.monitor.Start):
return err
}
return nil
}
func (container *Container) getProcessLabel() string {
// even if we have a process label return "" if we are running
// in privileged mode
@ -789,140 +337,16 @@ func (container *Container) getMountLabel() string {
return container.MountLabel
}
func (container *Container) stats() (*execdriver.ResourceStats, error) {
return container.daemon.stats(container)
}
func (container *Container) getExecIDs() []string {
return container.execCommands.List()
}
func (container *Container) exec(ec *ExecConfig) error {
container.Lock()
defer container.Unlock()
callback := func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
if processConfig.Tty {
// The callback is called after the process Start()
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
// which we close here.
if c, ok := processConfig.Stdout.(io.Closer); ok {
c.Close()
}
}
close(ec.waitStart)
return nil
}
// We use a callback here instead of a goroutine and an chan for
// synchronization purposes
cErr := promise.Go(func() error { return container.monitorExec(ec, callback) })
// Exec should not return until the process is actually running
select {
case <-ec.waitStart:
case err := <-cErr:
return err
}
return nil
}
func (container *Container) monitorExec(ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
var (
err error
exitCode int
)
pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin)
exitCode, err = container.daemon.Exec(container, ExecConfig, pipes, callback)
if err != nil {
logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
}
logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
if ExecConfig.OpenStdin {
if err := ExecConfig.streamConfig.stdin.Close(); err != nil {
logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
}
}
if err := ExecConfig.streamConfig.stdout.Clean(); err != nil {
logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
}
if err := ExecConfig.streamConfig.stderr.Clean(); err != nil {
logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
}
if ExecConfig.ProcessConfig.Terminal != nil {
if err := ExecConfig.ProcessConfig.Terminal.Close(); err != nil {
logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
}
}
// remove the exec command from the container's store only and not the
// daemon's store so that the exec command can be inspected.
container.execCommands.Delete(ExecConfig.ID)
return err
}
// Attach connects to the container's TTY, delegating to standard
// streams or websockets depending on the configuration.
func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
return attach(&container.streamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr)
}
func (container *Container) attachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
if logs {
logDriver, err := container.getLogger()
if err != nil {
return err
}
cLog, ok := logDriver.(logger.LogReader)
if !ok {
return logger.ErrReadLogsNotSupported
}
logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1})
LogLoop:
for {
select {
case msg, ok := <-logs.Msg:
if !ok {
break LogLoop
}
if msg.Source == "stdout" && stdout != nil {
stdout.Write(msg.Line)
}
if msg.Source == "stderr" && stderr != nil {
stderr.Write(msg.Line)
}
case err := <-logs.Err:
logrus.Errorf("Error streaming logs: %v", err)
break LogLoop
}
}
}
container.logEvent("attach")
//stream
if stream {
var stdinPipe io.ReadCloser
if stdin != nil {
r, w := io.Pipe()
go func() {
defer w.Close()
defer logrus.Debugf("Closing buffered stdin pipe")
io.Copy(w, stdin)
}()
stdinPipe = r
}
<-container.Attach(stdinPipe, stdout, stderr)
// If we are in stdinonce mode, wait for the process to end
// otherwise, simply return
if container.Config.StdinOnce && !container.Config.Tty {
container.WaitStop(-1 * time.Second)
}
}
return nil
}
func attach(streamConfig *streamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
var (
cStdout, cStderr io.ReadCloser
@ -1074,8 +498,8 @@ func (container *Container) shouldRestart() bool {
(container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0)
}
func (container *Container) mountVolumes() error {
mounts, err := container.setupMounts()
func (daemon *Daemon) mountVolumes(container *Container) error {
mounts, err := daemon.setupMounts(container)
if err != nil {
return err
}
@ -1108,44 +532,6 @@ func (container *Container) mountVolumes() error {
return nil
}
func (container *Container) prepareMountPoints() error {
for _, config := range container.MountPoints {
if len(config.Driver) > 0 {
v, err := container.daemon.createVolume(config.Name, config.Driver, nil)
if err != nil {
return err
}
config.Volume = v
}
}
return nil
}
func (container *Container) removeMountPoints(rm bool) error {
var rmErrors []string
for _, m := range container.MountPoints {
if m.Volume == nil {
continue
}
container.daemon.volumes.Decrement(m.Volume)
if rm {
err := container.daemon.volumes.Remove(m.Volume)
// ErrVolumeInUse is ignored because having this
// volume being referenced by other container is
// not an error, but an implementation detail.
// This prevents docker from logging "ERROR: Volume in use"
// where there is another container using the volume.
if err != nil && err != store.ErrVolumeInUse {
rmErrors = append(rmErrors, err.Error())
}
}
}
if len(rmErrors) > 0 {
return derr.ErrorCodeRemovingVolume.WithArgs(strings.Join(rmErrors, "\n"))
}
return nil
}
func (container *Container) unmountVolumes(forceSyscall bool) error {
var (
volumeMounts []volume.MountPoint

View File

@ -64,7 +64,7 @@ type Container struct {
func killProcessDirectly(container *Container) error {
if _, err := container.WaitStop(10 * time.Second); err != nil {
// Ensure that we don't kill ourselves
if pid := container.getPID(); pid != 0 {
if pid := container.GetPID(); pid != 0 {
logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
if err := syscall.Kill(pid, 9); err != nil {
if err != syscall.ESRCH {
@ -77,11 +77,8 @@ func killProcessDirectly(container *Container) error {
return nil
}
func (container *Container) setupLinkedContainers() ([]string, error) {
var (
env []string
daemon = container.daemon
)
func (daemon *Daemon) setupLinkedContainers(container *Container) ([]string, error) {
var env []string
children, err := daemon.children(container.Name)
if err != nil {
return nil, err
@ -185,17 +182,16 @@ func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.
return devs, derr.ErrorCodeDeviceInfo.WithArgs(deviceMapping.PathOnHost, err)
}
func populateCommand(c *Container, env []string) error {
func (daemon *Daemon) populateCommand(c *Container, env []string) error {
var en *execdriver.Network
if !c.Config.NetworkDisabled {
en = &execdriver.Network{}
if !c.daemon.execDriver.SupportsHooks() || c.hostConfig.NetworkMode.IsHost() {
if !daemon.execDriver.SupportsHooks() || c.hostConfig.NetworkMode.IsHost() {
en.NamespacePath = c.NetworkSettings.SandboxKey
}
parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
if parts[0] == "container" {
nc, err := c.getNetworkedContainer()
if c.hostConfig.NetworkMode.IsContainer() {
nc, err := daemon.getNetworkedContainer(c.ID, c.hostConfig.NetworkMode.ConnectedContainer())
if err != nil {
return err
}
@ -216,7 +212,7 @@ func populateCommand(c *Container, env []string) error {
}
if c.hostConfig.IpcMode.IsContainer() {
ic, err := c.getIpcContainer()
ic, err := daemon.getIpcContainer(c)
if err != nil {
return err
}
@ -273,7 +269,7 @@ func populateCommand(c *Container, env []string) error {
for _, ul := range ulimits {
ulIdx[ul.Name] = ul
}
for name, ul := range c.daemon.configStore.Ulimits {
for name, ul := range daemon.configStore.Ulimits {
if _, exists := ulIdx[name]; !exists {
ulimits = append(ulimits, ul)
}
@ -321,12 +317,12 @@ func populateCommand(c *Container, env []string) error {
processConfig.Env = env
remappedRoot := &execdriver.User{}
rootUID, rootGID := c.daemon.GetRemappedUIDGID()
rootUID, rootGID := daemon.GetRemappedUIDGID()
if rootUID != 0 {
remappedRoot.UID = rootUID
remappedRoot.GID = rootGID
}
uidMap, gidMap := c.daemon.GetUIDGIDMaps()
uidMap, gidMap := daemon.GetUIDGIDMaps()
c.command = &execdriver.Command{
CommonCommand: execdriver.CommonCommand{
@ -379,24 +375,23 @@ func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Devi
return append(devs, userDevices...)
}
// GetSize returns the real size & virtual size of the container.
func (container *Container) getSize() (int64, int64) {
// getSize returns the real size & virtual size of the container.
func (daemon *Daemon) getSize(container *Container) (int64, int64) {
var (
sizeRw, sizeRootfs int64
err error
driver = container.daemon.driver
)
if err := container.Mount(); err != nil {
if err := daemon.Mount(container); err != nil {
logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
return sizeRw, sizeRootfs
}
defer container.Unmount()
defer daemon.Unmount(container)
initID := fmt.Sprintf("%s-init", container.ID)
sizeRw, err = driver.DiffSize(container.ID, initID)
sizeRw, err = daemon.driver.DiffSize(container.ID, initID)
if err != nil {
logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err)
logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", daemon.driver, container.ID, err)
// FIXME: GetSize should return an error. Not changing it now in case
// there is a side-effect.
sizeRw = -1
@ -443,7 +438,7 @@ func (container *Container) buildHostnameFile() error {
return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
}
func (container *Container) buildSandboxOptions(n libnetwork.Network) ([]libnetwork.SandboxOption, error) {
func (daemon *Daemon) buildSandboxOptions(container *Container, n libnetwork.Network) ([]libnetwork.SandboxOption, error) {
var (
sboxOptions []libnetwork.SandboxOption
err error
@ -459,7 +454,7 @@ func (container *Container) buildSandboxOptions(n libnetwork.Network) ([]libnetw
sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox())
sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts"))
sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf"))
} else if container.daemon.execDriver.SupportsHooks() {
} else if daemon.execDriver.SupportsHooks() {
// OptionUseExternalKey is mandatory for userns support.
// But optional for non-userns support
sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey())
@ -479,8 +474,8 @@ func (container *Container) buildSandboxOptions(n libnetwork.Network) ([]libnetw
if len(container.hostConfig.DNS) > 0 {
dns = container.hostConfig.DNS
} else if len(container.daemon.configStore.DNS) > 0 {
dns = container.daemon.configStore.DNS
} else if len(daemon.configStore.DNS) > 0 {
dns = daemon.configStore.DNS
}
for _, d := range dns {
@ -489,8 +484,8 @@ func (container *Container) buildSandboxOptions(n libnetwork.Network) ([]libnetw
if len(container.hostConfig.DNSSearch) > 0 {
dnsSearch = container.hostConfig.DNSSearch
} else if len(container.daemon.configStore.DNSSearch) > 0 {
dnsSearch = container.daemon.configStore.DNSSearch
} else if len(daemon.configStore.DNSSearch) > 0 {
dnsSearch = daemon.configStore.DNSSearch
}
for _, ds := range dnsSearch {
@ -499,8 +494,8 @@ func (container *Container) buildSandboxOptions(n libnetwork.Network) ([]libnetw
if len(container.hostConfig.DNSOptions) > 0 {
dnsOptions = container.hostConfig.DNSOptions
} else if len(container.daemon.configStore.DNSOptions) > 0 {
dnsOptions = container.daemon.configStore.DNSOptions
} else if len(daemon.configStore.DNSOptions) > 0 {
dnsOptions = daemon.configStore.DNSOptions
}
for _, ds := range dnsOptions {
@ -537,7 +532,7 @@ func (container *Container) buildSandboxOptions(n libnetwork.Network) ([]libnetw
var childEndpoints, parentEndpoints []string
children, err := container.daemon.children(container.Name)
children, err := daemon.children(container.Name)
if err != nil {
return nil, err
}
@ -561,18 +556,18 @@ func (container *Container) buildSandboxOptions(n libnetwork.Network) ([]libnetw
}
bridgeSettings := container.NetworkSettings.Networks["bridge"]
refs := container.daemon.containerGraph().RefPaths(container.ID)
refs := daemon.containerGraph().RefPaths(container.ID)
for _, ref := range refs {
if ref.ParentID == "0" {
continue
}
c, err := container.daemon.Get(ref.ParentID)
c, err := daemon.Get(ref.ParentID)
if err != nil {
logrus.Error(err)
}
if c != nil && !container.daemon.configStore.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() {
if c != nil && !daemon.configStore.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() {
logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, bridgeSettings.IPAddress)
sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate(c.ID, ref.Name, bridgeSettings.IPAddress))
if ep.ID() != "" {
@ -720,13 +715,13 @@ func (container *Container) updateJoinInfo(n libnetwork.Network, ep libnetwork.E
return nil
}
func (container *Container) updateNetworkSettings(n libnetwork.Network) error {
func (daemon *Daemon) updateNetworkSettings(container *Container, n libnetwork.Network) error {
if container.NetworkSettings == nil {
container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)}
}
for s := range container.NetworkSettings.Networks {
sn, err := container.daemon.FindNetwork(s)
sn, err := daemon.FindNetwork(s)
if err != nil {
continue
}
@ -749,14 +744,14 @@ func (container *Container) updateNetworkSettings(n libnetwork.Network) error {
return nil
}
func (container *Container) updateEndpointNetworkSettings(n libnetwork.Network, ep libnetwork.Endpoint) error {
func (daemon *Daemon) updateEndpointNetworkSettings(container *Container, n libnetwork.Network, ep libnetwork.Endpoint) error {
networkSettings, err := container.buildEndpointInfo(n, ep, container.NetworkSettings)
if err != nil {
return err
}
if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
networkSettings.Bridge = container.daemon.configStore.Bridge.Iface
networkSettings.Bridge = daemon.configStore.Bridge.Iface
}
return nil
@ -770,8 +765,8 @@ func (container *Container) updateSandboxNetworkSettings(sb libnetwork.Sandbox)
// UpdateNetwork is used to update the container's network (e.g. when linked containers
// get removed/unlinked).
func (container *Container) updateNetwork() error {
ctrl := container.daemon.netController
func (daemon *Daemon) updateNetwork(container *Container) error {
ctrl := daemon.netController
sid := container.NetworkSettings.SandboxID
sb, err := ctrl.SandboxByID(sid)
@ -782,7 +777,7 @@ func (container *Container) updateNetwork() error {
// Find if container is connected to the default bridge network
var n libnetwork.Network
for name := range container.NetworkSettings.Networks {
sn, err := container.daemon.FindNetwork(name)
sn, err := daemon.FindNetwork(name)
if err != nil {
continue
}
@ -797,7 +792,7 @@ func (container *Container) updateNetwork() error {
return nil
}
options, err := container.buildSandboxOptions(n)
options, err := daemon.buildSandboxOptions(container, n)
if err != nil {
return derr.ErrorCodeNetworkUpdate.WithArgs(err)
}
@ -894,8 +889,8 @@ func (container *Container) buildCreateEndpointOptions(n libnetwork.Network) ([]
return createOptions, nil
}
func (container *Container) allocateNetwork() error {
controller := container.daemon.netController
func (daemon *Daemon) allocateNetwork(container *Container) error {
controller := daemon.netController
// Cleanup any stale sandbox left over due to ungraceful daemon shutdown
if err := controller.SandboxDestroy(container.ID); err != nil {
@ -914,7 +909,7 @@ func (container *Container) allocateNetwork() error {
networkName = controller.Config().Daemon.DefaultNetwork
}
if mode.IsUserDefined() {
n, err := container.daemon.FindNetwork(networkName)
n, err := daemon.FindNetwork(networkName)
if err != nil {
return err
}
@ -926,7 +921,7 @@ func (container *Container) allocateNetwork() error {
}
for n := range container.NetworkSettings.Networks {
if err := container.connectToNetwork(n, updateSettings); err != nil {
if err := daemon.connectToNetwork(container, n, updateSettings); err != nil {
return err
}
}
@ -934,9 +929,9 @@ func (container *Container) allocateNetwork() error {
return container.writeHostConfig()
}
func (container *Container) getNetworkSandbox() libnetwork.Sandbox {
func (daemon *Daemon) getNetworkSandbox(container *Container) libnetwork.Sandbox {
var sb libnetwork.Sandbox
container.daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool {
daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool {
if s.ContainerID() == container.ID {
sb = s
return true
@ -947,11 +942,11 @@ func (container *Container) getNetworkSandbox() libnetwork.Sandbox {
}
// ConnectToNetwork connects a container to a netork
func (container *Container) ConnectToNetwork(idOrName string) error {
func (daemon *Daemon) ConnectToNetwork(container *Container, idOrName string) error {
if !container.Running {
return derr.ErrorCodeNotRunning.WithArgs(container.ID)
}
if err := container.connectToNetwork(idOrName, true); err != nil {
if err := daemon.connectToNetwork(container, idOrName, true); err != nil {
return err
}
if err := container.toDiskLocking(); err != nil {
@ -960,26 +955,26 @@ func (container *Container) ConnectToNetwork(idOrName string) error {
return nil
}
func (container *Container) connectToNetwork(idOrName string, updateSettings bool) (err error) {
func (daemon *Daemon) connectToNetwork(container *Container, idOrName string, updateSettings bool) (err error) {
if container.hostConfig.NetworkMode.IsContainer() {
return runconfig.ErrConflictSharedNetwork
}
if runconfig.NetworkMode(idOrName).IsBridge() &&
container.daemon.configStore.DisableBridge {
daemon.configStore.DisableBridge {
container.Config.NetworkDisabled = true
return nil
}
controller := container.daemon.netController
controller := daemon.netController
n, err := container.daemon.FindNetwork(idOrName)
n, err := daemon.FindNetwork(idOrName)
if err != nil {
return err
}
if updateSettings {
if err := container.updateNetworkSettings(n); err != nil {
if err := daemon.updateNetworkSettings(container, n); err != nil {
return err
}
}
@ -1011,13 +1006,13 @@ func (container *Container) connectToNetwork(idOrName string, updateSettings boo
}
}()
if err := container.updateEndpointNetworkSettings(n, ep); err != nil {
if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil {
return err
}
sb := container.getNetworkSandbox()
sb := daemon.getNetworkSandbox(container)
if sb == nil {
options, err := container.buildSandboxOptions(n)
options, err := daemon.buildSandboxOptions(container, n)
if err != nil {
return err
}
@ -1040,12 +1035,12 @@ func (container *Container) connectToNetwork(idOrName string, updateSettings boo
return nil
}
func (container *Container) initializeNetworking() error {
func (daemon *Daemon) initializeNetworking(container *Container) error {
var err error
if container.hostConfig.NetworkMode.IsContainer() {
// we need to get the hosts files from the container to join
nc, err := container.getNetworkedContainer()
nc, err := daemon.getNetworkedContainer(container.ID, container.hostConfig.NetworkMode.ConnectedContainer())
if err != nil {
return err
}
@ -1071,7 +1066,7 @@ func (container *Container) initializeNetworking() error {
}
if err := container.allocateNetwork(); err != nil {
if err := daemon.allocateNetwork(container); err != nil {
return err
}
@ -1080,21 +1075,21 @@ func (container *Container) initializeNetworking() error {
// called from the libcontainer pre-start hook to set the network
// namespace configuration linkage to the libnetwork "sandbox" entity
func (container *Container) setNetworkNamespaceKey(pid int) error {
func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error {
path := fmt.Sprintf("/proc/%d/ns/net", pid)
var sandbox libnetwork.Sandbox
search := libnetwork.SandboxContainerWalker(&sandbox, container.ID)
container.daemon.netController.WalkSandboxes(search)
search := libnetwork.SandboxContainerWalker(&sandbox, containerID)
daemon.netController.WalkSandboxes(search)
if sandbox == nil {
return derr.ErrorCodeNoSandbox.WithArgs(container.ID)
return derr.ErrorCodeNoSandbox.WithArgs(containerID)
}
return sandbox.SetKey(path)
}
func (container *Container) getIpcContainer() (*Container, error) {
func (daemon *Daemon) getIpcContainer(container *Container) (*Container, error) {
containerID := container.hostConfig.IpcMode.Container()
c, err := container.daemon.Get(containerID)
c, err := daemon.Get(containerID)
if err != nil {
return nil, err
}
@ -1131,30 +1126,21 @@ func (container *Container) setupWorkingDirectory() error {
return nil
}
func (container *Container) getNetworkedContainer() (*Container, error) {
parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2)
switch parts[0] {
case "container":
if len(parts) != 2 {
return nil, derr.ErrorCodeParseContainer
}
nc, err := container.daemon.Get(parts[1])
if err != nil {
return nil, err
}
if container == nc {
return nil, derr.ErrorCodeJoinSelf
}
if !nc.IsRunning() {
return nil, derr.ErrorCodeJoinRunning.WithArgs(parts[1])
}
return nc, nil
default:
return nil, derr.ErrorCodeModeNotContainer
func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*Container, error) {
nc, err := daemon.Get(connectedContainerID)
if err != nil {
return nil, err
}
if containerID == nc.ID {
return nil, derr.ErrorCodeJoinSelf
}
if !nc.IsRunning() {
return nil, derr.ErrorCodeJoinRunning.WithArgs(connectedContainerID)
}
return nc, nil
}
func (container *Container) releaseNetwork() {
func (daemon *Daemon) releaseNetwork(container *Container) {
if container.hostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled {
return
}
@ -1171,7 +1157,7 @@ func (container *Container) releaseNetwork() {
return
}
sb, err := container.daemon.netController.SandboxByID(sid)
sb, err := daemon.netController.SandboxByID(sid)
if err != nil {
logrus.Errorf("error locating sandbox id %s: %v", sid, err)
return
@ -1338,8 +1324,8 @@ func (container *Container) hasMountFor(path string) bool {
return exists
}
func (container *Container) setupIpcDirs() error {
rootUID, rootGID := container.daemon.GetRemappedUIDGID()
func (daemon *Daemon) setupIpcDirs(container *Container) error {
rootUID, rootGID := daemon.GetRemappedUIDGID()
if !container.hasMountFor("/dev/shm") {
shmPath, err := container.shmPath()
if err != nil {
@ -1444,20 +1430,3 @@ func (container *Container) ipcMounts() []execdriver.Mount {
func detachMounted(path string) error {
return syscall.Unmount(path, syscall.MNT_DETACH)
}
// conditionalMountOnStart is a platform specific helper function during the
// container start to call mount.
func (container *Container) conditionalMountOnStart() error {
if err := container.Mount(); err != nil {
return err
}
return nil
}
// conditionalUnmountOnCleanup is a platform specific helper function called
// during the cleanup of a container to unmount.
func (container *Container) conditionalUnmountOnCleanup() {
if err := container.Unmount(); err != nil {
logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
}
}

View File

@ -5,7 +5,6 @@ package daemon
import (
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/execdriver"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/volume"
@ -28,7 +27,7 @@ func killProcessDirectly(container *Container) error {
return nil
}
func (container *Container) setupLinkedContainers() ([]string, error) {
func (daemon *Daemon) setupLinkedContainers(container *Container) ([]string, error) {
return nil, nil
}
@ -37,12 +36,12 @@ func (container *Container) createDaemonEnvironment(linkedEnv []string) []string
return container.Config.Env
}
func (container *Container) initializeNetworking() error {
func (daemon *Daemon) initializeNetworking(container *Container) error {
return nil
}
// ConnectToNetwork connects a container to the network
func (container *Container) ConnectToNetwork(idOrName string) error {
func (daemon *Daemon) ConnectToNetwork(container *Container, idOrName string) error {
return nil
}
@ -55,7 +54,7 @@ func (container *Container) setupWorkingDirectory() error {
return nil
}
func populateCommand(c *Container, env []string) error {
func (daemon *Daemon) populateCommand(c *Container, env []string) error {
en := &execdriver.Network{
Interface: nil,
}
@ -67,7 +66,7 @@ func populateCommand(c *Container, env []string) error {
if !c.Config.NetworkDisabled {
en.Interface = &execdriver.NetworkInterface{
MacAddress: c.Config.MacAddress,
Bridge: c.daemon.configStore.Bridge.VirtualSwitchName,
Bridge: daemon.configStore.Bridge.VirtualSwitchName,
PortBindings: c.hostConfig.PortBindings,
// TODO Windows. Include IPAddress. There already is a
@ -100,22 +99,22 @@ func populateCommand(c *Container, env []string) error {
processConfig.Env = env
var layerPaths []string
img, err := c.daemon.graph.Get(c.ImageID)
img, err := daemon.graph.Get(c.ImageID)
if err != nil {
return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err)
}
for i := img; i != nil && err == nil; i, err = c.daemon.graph.GetParent(i) {
lp, err := c.daemon.driver.Get(i.ID, "")
for i := img; i != nil && err == nil; i, err = daemon.graph.GetParent(i) {
lp, err := daemon.driver.Get(i.ID, "")
if err != nil {
return derr.ErrorCodeGetLayer.WithArgs(c.daemon.driver.String(), i.ID, err)
return derr.ErrorCodeGetLayer.WithArgs(daemon.driver.String(), i.ID, err)
}
layerPaths = append(layerPaths, lp)
err = c.daemon.driver.Put(i.ID)
err = daemon.driver.Put(i.ID)
if err != nil {
return derr.ErrorCodePutLayer.WithArgs(c.daemon.driver.String(), i.ID, err)
return derr.ErrorCodePutLayer.WithArgs(daemon.driver.String(), i.ID, err)
}
}
m, err := c.daemon.driver.GetMetadata(c.ID)
m, err := daemon.driver.GetMetadata(c.ID)
if err != nil {
return derr.ErrorCodeGetLayerMetadata.WithArgs(err)
}
@ -143,27 +142,27 @@ func populateCommand(c *Container, env []string) error {
return nil
}
// GetSize returns real size & virtual size
func (container *Container) getSize() (int64, int64) {
// getSize returns real size & virtual size
func (daemon *Daemon) getSize(container *Container) (int64, int64) {
// TODO Windows
return 0, 0
}
// setNetworkNamespaceKey is a no-op on Windows.
func (container *Container) setNetworkNamespaceKey(pid int) error {
func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error {
return nil
}
// allocateNetwork is a no-op on Windows.
func (container *Container) allocateNetwork() error {
func (daemon *Daemon) allocateNetwork(container *Container) error {
return nil
}
func (container *Container) updateNetwork() error {
func (daemon *Daemon) updateNetwork(container *Container) error {
return nil
}
func (container *Container) releaseNetwork() {
func (daemon *Daemon) releaseNetwork(container *Container) {
}
// appendNetworkMounts appends any network mounts to the array of mount points passed in.
@ -173,7 +172,7 @@ func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint)
return volumeMounts, nil
}
func (container *Container) setupIpcDirs() error {
func (daemon *Daemon) setupIpcDirs(container *Container) error {
return nil
}
@ -191,26 +190,3 @@ func (container *Container) ipcMounts() []execdriver.Mount {
func getDefaultRouteMtu() (int, error) {
return -1, errSystemNotSupported
}
// conditionalMountOnStart is a platform specific helper function during the
// container start to call mount.
func (container *Container) conditionalMountOnStart() error {
// We do not mount if a Hyper-V container
if !container.hostConfig.Isolation.IsHyperV() {
if err := container.Mount(); err != nil {
return err
}
}
return nil
}
// conditionalUnmountOnCleanup is a platform specific helper function called
// during the cleanup of a container to unmount.
func (container *Container) conditionalUnmountOnCleanup() {
// We do not unmount if a Hyper-V container
if !container.hostConfig.Isolation.IsHyperV() {
if err := container.Unmount(); err != nil {
logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
}
}
}

View File

@ -109,17 +109,17 @@ func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, re
}
defer func() {
if retErr != nil {
if err := container.removeMountPoints(true); err != nil {
if err := daemon.removeMountPoints(container, true); err != nil {
logrus.Error(err)
}
}
}()
if err := container.Mount(); err != nil {
if err := daemon.Mount(container); err != nil {
return nil, err
}
defer container.Unmount()
defer daemon.Unmount(container)
if err := createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig, img); err != nil {
if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig, img); err != nil {
return nil, err
}
@ -127,7 +127,7 @@ func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, re
logrus.Errorf("Error saving new container to disk: %v", err)
return nil, err
}
container.logEvent("create")
daemon.LogContainerEvent(container, "create")
return container, nil
}

View File

@ -15,7 +15,7 @@ import (
)
// createContainerPlatformSpecificSettings performs platform specific container create functionality
func createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
for spec := range config.Volumes {
name := stringid.GenerateNonCryptoID()
destination := filepath.Clean(spec)
@ -45,7 +45,7 @@ func createContainerPlatformSpecificSettings(container *Container, config *runco
}
}
v, err := container.daemon.createVolume(name, volumeDriver, nil)
v, err := daemon.createVolume(name, volumeDriver, nil)
if err != nil {
return err
}

View File

@ -10,7 +10,7 @@ import (
)
// createContainerPlatformSpecificSettings performs platform specific container create functionality
func createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
for spec := range config.Volumes {
mp, err := volume.ParseMountSpec(spec, hostConfig.VolumeDriver)
@ -41,7 +41,7 @@ func createContainerPlatformSpecificSettings(container *Container, config *runco
// Create the volume in the volume driver. If it doesn't exist,
// a new one will be created.
v, err := container.daemon.createVolume(mp.Name, volumeDriver, nil)
v, err := daemon.createVolume(mp.Name, volumeDriver, nil)
if err != nil {
return err
}

View File

@ -56,6 +56,8 @@ import (
"github.com/docker/docker/volume/local"
"github.com/docker/docker/volume/store"
"github.com/docker/libnetwork"
lntypes "github.com/docker/libnetwork/types"
"github.com/opencontainers/runc/libcontainer"
)
var (
@ -192,7 +194,7 @@ func (daemon *Daemon) load(id string) (*Container, error) {
// Register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) Register(container *Container) error {
if container.daemon != nil || daemon.Exists(container.ID) {
if daemon.Exists(container.ID) {
return fmt.Errorf("Container is already loaded")
}
if err := validateID(container.ID); err != nil {
@ -202,8 +204,6 @@ func (daemon *Daemon) Register(container *Container) error {
return err
}
container.daemon = daemon
// Attach to stdout and stderr
container.stderr = new(broadcaster.Unbuffered)
container.stdout = new(broadcaster.Unbuffered)
@ -234,7 +234,7 @@ func (daemon *Daemon) Register(container *Container) error {
container.unmountIpcMounts(mount.Unmount)
if err := container.Unmount(); err != nil {
if err := daemon.Unmount(container); err != nil {
logrus.Debugf("unmount error %s", err)
}
if err := container.toDiskLocking(); err != nil {
@ -246,7 +246,7 @@ func (daemon *Daemon) Register(container *Container) error {
return err
}
if err := container.prepareMountPoints(); err != nil {
if err := daemon.prepareMountPoints(container); err != nil {
return err
}
@ -349,7 +349,7 @@ func (daemon *Daemon) restore() error {
if daemon.configStore.AutoRestart && container.shouldRestart() {
logrus.Debugf("Starting container %s", container.ID)
if err := container.Start(); err != nil {
if err := daemon.containerStart(container); err != nil {
logrus.Errorf("Failed to start container %s: %s", container.ID, err)
}
}
@ -816,7 +816,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
d.configStore = config
d.sysInitPath = sysInitPath
d.execDriver = ed
d.statsCollector = newStatsCollector(1 * time.Second)
d.statsCollector = d.newStatsCollector(1 * time.Second)
d.defaultLogConfig = config.LogConfig
d.RegistryService = registryService
d.EventsService = eventsService
@ -838,7 +838,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
return d, nil
}
func stopContainer(c *Container) error {
func (daemon *Daemon) shutdownContainer(c *Container) error {
// TODO(windows): Handle docker restart with paused containers
if c.isPaused() {
// To terminate a process in freezer cgroup, we should send
@ -849,10 +849,10 @@ func stopContainer(c *Container) error {
if !ok {
return fmt.Errorf("System doesn not support SIGTERM")
}
if err := c.daemon.kill(c, int(sig)); err != nil {
if err := daemon.kill(c, int(sig)); err != nil {
return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
}
if err := c.unpause(); err != nil {
if err := daemon.containerUnpause(c); err != nil {
return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
}
if _, err := c.WaitStop(10 * time.Second); err != nil {
@ -861,7 +861,7 @@ func stopContainer(c *Container) error {
if !ok {
return fmt.Errorf("System does not support SIGKILL")
}
if err := c.daemon.kill(c, int(sig)); err != nil {
if err := daemon.kill(c, int(sig)); err != nil {
logrus.Errorf("Failed to SIGKILL container %s", c.ID)
}
c.WaitStop(-1 * time.Second)
@ -869,7 +869,7 @@ func stopContainer(c *Container) error {
}
}
// If container failed to exit in 10 seconds of SIGTERM, then using the force
if err := c.Stop(10); err != nil {
if err := daemon.containerStop(c, 10); err != nil {
return fmt.Errorf("Stop container %s with error: %v", c.ID, err)
}
@ -891,7 +891,7 @@ func (daemon *Daemon) Shutdown() error {
group.Add(1)
go func(c *Container) {
defer group.Done()
if err := stopContainer(c); err != nil {
if err := daemon.shutdownContainer(c); err != nil {
logrus.Errorf("Stop container error: %v", err)
return
}
@ -947,16 +947,18 @@ func (daemon *Daemon) Mount(container *Container) error {
return nil
}
func (daemon *Daemon) unmount(container *Container) error {
// Unmount unsets the container base filesystem
func (daemon *Daemon) Unmount(container *Container) error {
return daemon.driver.Put(container.ID)
}
func (daemon *Daemon) run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
// Run uses the execution driver to run a given container
func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
hooks := execdriver.Hooks{
Start: startCallback,
}
hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
return c.setNetworkNamespaceKey(pid)
return daemon.setNetworkNamespaceKey(c.ID, pid)
})
return daemon.execDriver.Run(c.command, pipes, hooks)
}
@ -1299,3 +1301,59 @@ func (daemon *Daemon) SearchRegistryForImages(term string,
headers map[string][]string) (*registry.SearchResults, error) {
return daemon.RegistryService.Search(term, authConfig, headers)
}
// IsShuttingDown tells whether the daemon is shutting down or not
func (daemon *Daemon) IsShuttingDown() bool {
return daemon.shutdown
}
// GetContainerStats collects all the stats published by a container
func (daemon *Daemon) GetContainerStats(container *Container) (*execdriver.ResourceStats, error) {
stats, err := daemon.stats(container)
if err != nil {
return nil, err
}
// Retrieve the nw statistics from libnetwork and inject them in the Stats
var nwStats []*libcontainer.NetworkInterface
if nwStats, err = daemon.getNetworkStats(container); err != nil {
return nil, err
}
stats.Interfaces = nwStats
return stats, nil
}
func (daemon *Daemon) getNetworkStats(c *Container) ([]*libcontainer.NetworkInterface, error) {
var list []*libcontainer.NetworkInterface
sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID)
if err != nil {
return list, err
}
stats, err := sb.Statistics()
if err != nil {
return list, err
}
// Convert libnetwork nw stats into libcontainer nw stats
for ifName, ifStats := range stats {
list = append(list, convertLnNetworkStats(ifName, ifStats))
}
return list, nil
}
func convertLnNetworkStats(name string, stats *lntypes.InterfaceStatistics) *libcontainer.NetworkInterface {
n := &libcontainer.NetworkInterface{Name: name}
n.RxBytes = stats.RxBytes
n.RxPackets = stats.RxPackets
n.RxErrors = stats.RxErrors
n.RxDropped = stats.RxDropped
n.TxBytes = stats.TxBytes
n.TxPackets = stats.TxPackets
n.TxErrors = stats.TxErrors
n.TxDropped = stats.TxDropped
return n
}

View File

@ -610,6 +610,20 @@ func (daemon *Daemon) newBaseContainer(id string) *Container {
}
}
// conditionalMountOnStart is a platform specific helper function during the
// container start to call mount.
func (daemon *Daemon) conditionalMountOnStart(container *Container) error {
return daemon.Mount(container)
}
// conditionalUnmountOnCleanup is a platform specific helper function called
// during the cleanup of a container to unmount.
func (daemon *Daemon) conditionalUnmountOnCleanup(container *Container) {
if err := daemon.Unmount(container); err != nil {
logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
}
}
// getDefaultRouteMtu returns the MTU for the default route's interface.
func getDefaultRouteMtu() (int, error) {
routes, err := netlink.RouteList(nil, 0)

View File

@ -154,3 +154,26 @@ func (daemon *Daemon) newBaseContainer(id string) *Container {
func (daemon *Daemon) cleanupMounts() error {
return nil
}
// conditionalMountOnStart is a platform specific helper function during the
// container start to call mount.
func (daemon *Daemon) conditionalMountOnStart(container *Container) error {
// We do not mount if a Hyper-V container
if !container.hostConfig.Isolation.IsHyperV() {
if err := daemon.Mount(container); err != nil {
return err
}
}
return nil
}
// conditionalUnmountOnCleanup is a platform specific helper function called
// during the cleanup of a container to unmount.
func (daemon *Daemon) conditionalUnmountOnCleanup(container *Container) {
// We do not unmount if a Hyper-V container
if !container.hostConfig.Isolation.IsHyperV() {
if err := daemon.Unmount(container); err != nil {
logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
}
}
}

View File

@ -96,7 +96,8 @@ func (d Docker) Create(cfg *runconfig.Config, hostCfg *runconfig.HostConfig) (*d
if err != nil {
return nil, ccr.Warnings, err
}
return container, ccr.Warnings, container.Mount()
return container, ccr.Warnings, d.Mount(container)
}
// Remove removes a container specified by `id`.
@ -205,6 +206,26 @@ func (d Docker) GetCachedImage(imgID string, cfg *runconfig.Config) (string, err
return cache.ID, nil
}
// Kill stops the container execution abruptly.
func (d Docker) Kill(container *daemon.Container) error {
return d.Daemon.Kill(container)
}
// Mount mounts the root filesystem for the container.
func (d Docker) Mount(c *daemon.Container) error {
return d.Daemon.Mount(c)
}
// Unmount unmounts the root filesystem for the container.
func (d Docker) Unmount(c *daemon.Container) error {
return d.Daemon.Unmount(c)
}
// Start starts a container
func (d Docker) Start(c *daemon.Container) error {
return d.Daemon.Start(c)
}
// Following is specific to builder contexts
// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used

View File

@ -45,7 +45,7 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
parentContainer, _ := daemon.Get(pe.ID())
if parentContainer != nil {
if err := parentContainer.updateNetwork(); err != nil {
if err := daemon.updateNetwork(parentContainer); err != nil {
logrus.Debugf("Could not update network to remove link %s: %v", n, err)
}
}
@ -58,7 +58,7 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
return err
}
if err := container.removeMountPoints(config.RemoveVolume); err != nil {
if err := daemon.removeMountPoints(container, config.RemoveVolume); err != nil {
logrus.Error(err)
}
@ -71,7 +71,7 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
if !forceRemove {
return derr.ErrorCodeRmRunning
}
if err := container.Kill(); err != nil {
if err := daemon.Kill(container); err != nil {
return derr.ErrorCodeRmFailed.WithArgs(err)
}
}
@ -90,7 +90,7 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
// if stats are currently getting collected.
daemon.statsCollector.stopCollection(container)
if err = container.Stop(3); err != nil {
if err = daemon.containerStop(container, 3); err != nil {
return err
}
@ -111,7 +111,7 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID)
os.RemoveAll(container.root)
container.logEvent("destroy")
daemon.LogContainerEvent(container, "destroy")
}
}()
@ -140,7 +140,7 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
daemon.idIndex.Delete(container.ID)
daemon.containers.Delete(container.ID)
container.logEvent("destroy")
daemon.LogContainerEvent(container, "destroy")
return nil
}

10
daemon/events.go Normal file
View File

@ -0,0 +1,10 @@
package daemon
// LogContainerEvent generates an event related to a container.
func (daemon *Daemon) LogContainerEvent(container *Container, action string) {
daemon.EventsService.Log(
action,
container.ID,
container.Config.Image,
)
}

View File

@ -13,6 +13,7 @@ import (
"github.com/docker/docker/pkg/broadcaster"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/stringutils"
"github.com/docker/docker/runconfig"
@ -187,7 +188,7 @@ func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, erro
d.registerExecCommand(ExecConfig)
container.logEvent("exec_create: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
d.LogContainerEvent(container, "exec_create: "+ExecConfig.ProcessConfig.Entrypoint+" "+strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
return ExecConfig.ID, nil
}
@ -215,7 +216,7 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.
logrus.Debugf("starting exec command %s in container %s", ec.ID, ec.Container.ID)
container := ec.Container
container.logEvent("exec_start: " + ec.ProcessConfig.Entrypoint + " " + strings.Join(ec.ProcessConfig.Arguments, " "))
d.LogContainerEvent(container, "exec_start: "+ec.ProcessConfig.Entrypoint+" "+strings.Join(ec.ProcessConfig.Arguments, " "))
if ec.OpenStdin {
r, w := io.Pipe()
@ -251,7 +252,7 @@ func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.
// the exitStatus) even after the cmd is done running.
go func() {
execErr <- container.exec(ec)
execErr <- d.containerExec(container, ec)
}()
select {
@ -329,3 +330,67 @@ func (d *Daemon) containerExecIds() map[string]struct{} {
}
return ids
}
func (d *Daemon) containerExec(container *Container, ec *ExecConfig) error {
container.Lock()
defer container.Unlock()
callback := func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
if processConfig.Tty {
// The callback is called after the process Start()
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
// which we close here.
if c, ok := processConfig.Stdout.(io.Closer); ok {
c.Close()
}
}
close(ec.waitStart)
return nil
}
// We use a callback here instead of a goroutine and an chan for
// synchronization purposes
cErr := promise.Go(func() error { return d.monitorExec(container, ec, callback) })
// Exec should not return until the process is actually running
select {
case <-ec.waitStart:
case err := <-cErr:
return err
}
return nil
}
func (d *Daemon) monitorExec(container *Container, ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
var (
err error
exitCode int
)
pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin)
exitCode, err = d.Exec(container, ExecConfig, pipes, callback)
if err != nil {
logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
}
logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
if ExecConfig.OpenStdin {
if err := ExecConfig.streamConfig.stdin.Close(); err != nil {
logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
}
}
if err := ExecConfig.streamConfig.stdout.Clean(); err != nil {
logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
}
if err := ExecConfig.streamConfig.stderr.Clean(); err != nil {
logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
}
if ExecConfig.ProcessConfig.Terminal != nil {
if err := ExecConfig.ProcessConfig.Terminal.Close(); err != nil {
logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
}
}
// remove the exec command from the container's store only and not the
// daemon's store so that the exec command can be inspected.
container.execCommands.Delete(ExecConfig.ID)
return err
}

View File

@ -4,6 +4,8 @@ import (
"io"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/ioutils"
)
// ContainerExport writes the contents of the container to the given
@ -14,7 +16,7 @@ func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
return err
}
data, err := container.export()
data, err := daemon.containerExport(container)
if err != nil {
return derr.ErrorCodeExportFailed.WithArgs(name, err)
}
@ -26,3 +28,27 @@ func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
}
return nil
}
func (daemon *Daemon) containerExport(container *Container) (archive.Archive, error) {
if err := daemon.Mount(container); err != nil {
return nil, err
}
uidMaps, gidMaps := daemon.GetUIDGIDMaps()
archive, err := archive.TarWithOptions(container.basefs, &archive.TarOptions{
Compression: archive.Uncompressed,
UIDMaps: uidMaps,
GIDMaps: gidMaps,
})
if err != nil {
daemon.Unmount(container)
return nil, err
}
arch := ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
daemon.Unmount(container)
return err
})
daemon.LogContainerEvent(container, "export")
return arch, err
}

View File

@ -140,7 +140,7 @@ func (daemon *Daemon) getInspectData(container *Container, size bool) (*types.Co
sizeRootFs int64
)
if size {
sizeRw, sizeRootFs = container.getSize()
sizeRw, sizeRootFs = daemon.getSize(container)
contJSONBase.SizeRw = &sizeRw
contJSONBase.SizeRootFs = &sizeRootFs
}

View File

@ -4,7 +4,10 @@ import (
"fmt"
"runtime"
"syscall"
"time"
"github.com/Sirupsen/logrus"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/pkg/signal"
)
@ -24,14 +27,96 @@ func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
if err := container.Kill(); err != nil {
if err := daemon.Kill(container); err != nil {
return err
}
} else {
// Otherwise, just send the requested signal
if err := container.killSig(int(sig)); err != nil {
if err := daemon.killWithSignal(container, int(sig)); err != nil {
return err
}
}
return nil
}
// killWithSignal sends the container the given signal. This wrapper for the
// host specific kill command prepares the container before attempting
// to send the signal. An error is returned if the container is paused
// or not running, or if there is a problem returned from the
// underlying kill command.
func (daemon *Daemon) killWithSignal(container *Container, sig int) error {
logrus.Debugf("Sending %d to %s", sig, container.ID)
container.Lock()
defer container.Unlock()
// We could unpause the container for them rather than returning this error
if container.Paused {
return derr.ErrorCodeUnpauseContainer.WithArgs(container.ID)
}
if !container.Running {
return derr.ErrorCodeNotRunning.WithArgs(container.ID)
}
container.ExitOnNext()
// if the container is currently restarting we do not need to send the signal
// to the process. Telling the monitor that it should exit on it's next event
// loop is enough
if container.Restarting {
return nil
}
if err := daemon.kill(container, sig); err != nil {
return err
}
daemon.LogContainerEvent(container, "kill")
return nil
}
// Kill forcefully terminates a container.
func (daemon *Daemon) Kill(container *Container) error {
if !container.IsRunning() {
return derr.ErrorCodeNotRunning.WithArgs(container.ID)
}
// 1. Send SIGKILL
if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil {
// While normally we might "return err" here we're not going to
// because if we can't stop the container by this point then
// its probably because its already stopped. Meaning, between
// the time of the IsRunning() call above and now it stopped.
// Also, since the err return will be exec driver specific we can't
// look for any particular (common) error that would indicate
// that the process is already dead vs something else going wrong.
// So, instead we'll give it up to 2 more seconds to complete and if
// by that time the container is still running, then the error
// we got is probably valid and so we return it to the caller.
if container.IsRunning() {
container.WaitStop(2 * time.Second)
if container.IsRunning() {
return err
}
}
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := killProcessDirectly(container); err != nil {
return err
}
container.WaitStop(-1 * time.Second)
return nil
}
// killPossibleDeadProcess is a wrapper aroung killSig() suppressing "no such process" error.
func (daemon *Daemon) killPossiblyDeadProcess(container *Container, sig int) error {
err := daemon.killWithSignal(container, sig)
if err == syscall.ESRCH {
logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPID(), sig)
return nil
}
return err
}

View File

@ -369,7 +369,7 @@ func (daemon *Daemon) transformContainer(container *Container, ctx *listContext)
}
if ctx.Size {
sizeRw, sizeRootFs := container.getSize()
sizeRw, sizeRootFs := daemon.getSize(container)
newC.SizeRw = sizeRw
newC.SizeRootFs = sizeRootFs
}

View File

@ -7,6 +7,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/logger/jsonfilelog"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/pkg/stdcopy"
)
@ -48,7 +49,7 @@ func (daemon *Daemon) ContainerLogs(containerName string, config *ContainerLogsC
}
config.OutStream = outStream
cLog, err := container.getLogger()
cLog, err := daemon.getLogger(container)
if err != nil {
return err
}
@ -97,3 +98,42 @@ func (daemon *Daemon) ContainerLogs(containerName string, config *ContainerLogsC
}
}
}
func (daemon *Daemon) getLogger(container *Container) (logger.Logger, error) {
if container.logDriver != nil && container.IsRunning() {
return container.logDriver, nil
}
cfg := container.getLogConfig(daemon.defaultLogConfig)
if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil {
return nil, err
}
return container.StartLogger(cfg)
}
// StartLogging initializes and starts the container logging stream.
func (daemon *Daemon) StartLogging(container *Container) error {
cfg := container.getLogConfig(daemon.defaultLogConfig)
if cfg.Type == "none" {
return nil // do not start logging routines
}
if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil {
return err
}
l, err := container.StartLogger(cfg)
if err != nil {
return derr.ErrorCodeInitLogger.WithArgs(err)
}
copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l)
container.logCopier = copier
copier.Run()
container.logDriver = l
// set LogPath field only for json-file logdriver
if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok {
container.LogPath = jl.LogPath()
}
return nil
}

View File

@ -17,6 +17,20 @@ const (
loggerCloseTimeout = 10 * time.Second
)
// containerSupervisor defines the interface that a supervisor must implement
type containerSupervisor interface {
// LogContainerEvent generates events related to a given container
LogContainerEvent(*Container, string)
// Cleanup ensures that the container is properly unmounted
Cleanup(*Container)
// StartLogging starts the logging driver for the container
StartLogging(*Container) error
// Run starts a container
Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error)
// IsShuttingDown tells whether the supervisor is shutting down or not
IsShuttingDown() bool
}
// containerMonitor monitors the execution of a container's main process.
// If a restart policy is specified for the container the monitor will ensure that the
// process is restarted based on the rules of the policy. When the container is finally stopped
@ -25,6 +39,9 @@ const (
type containerMonitor struct {
mux sync.Mutex
// supervisor keeps track of the container and the events it generates
supervisor containerSupervisor
// container is the container being monitored
container *Container
@ -57,8 +74,9 @@ type containerMonitor struct {
// newContainerMonitor returns an initialized containerMonitor for the provided container
// honoring the provided restart policy
func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor {
func (daemon *Daemon) newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor {
return &containerMonitor{
supervisor: daemon,
container: container,
restartPolicy: policy,
timeIncrement: defaultTimeIncrement,
@ -86,7 +104,7 @@ func (m *containerMonitor) ExitOnNext() {
// unmounts the contatiner's root filesystem
func (m *containerMonitor) Close() error {
// Cleanup networking and mounts
m.container.cleanup()
m.supervisor.Cleanup(m.container)
// FIXME: here is race condition between two RUN instructions in Dockerfile
// because they share same runconfig and change image. Must be fixed
@ -130,7 +148,7 @@ func (m *containerMonitor) Start() error {
for {
m.container.RestartCount++
if err := m.container.startLogging(); err != nil {
if err := m.supervisor.StartLogging(m.container); err != nil {
m.resetContainer(false)
return err
@ -138,11 +156,11 @@ func (m *containerMonitor) Start() error {
pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)
m.container.logEvent("start")
m.logEvent("start")
m.lastStartTime = time.Now()
if exitStatus, err = m.container.daemon.run(m.container, pipes, m.callback); err != nil {
if exitStatus, err = m.supervisor.Run(m.container, pipes, m.callback); err != nil {
// if we receive an internal error from the initial start of a container then lets
// return it instead of entering the restart loop
if m.container.RestartCount == 0 {
@ -162,7 +180,7 @@ func (m *containerMonitor) Start() error {
if m.shouldRestart(exitStatus.ExitCode) {
m.container.setRestarting(&exitStatus)
m.container.logEvent("die")
m.logEvent("die")
m.resetContainer(true)
// sleep with a small time increment between each restart to help avoid issues cased by quickly
@ -177,7 +195,7 @@ func (m *containerMonitor) Start() error {
continue
}
m.container.logEvent("die")
m.logEvent("die")
m.resetContainer(true)
return err
}
@ -222,7 +240,7 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool {
// do not restart if the user or docker has requested that this container be stopped
if m.shouldStop {
m.container.HasBeenManuallyStopped = !m.container.daemon.shutdown
m.container.HasBeenManuallyStopped = !m.supervisor.IsShuttingDown()
return false
}
@ -249,7 +267,7 @@ func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid
go func() {
_, ok := <-chOOM
if ok {
m.container.logEvent("oom")
m.logEvent("oom")
}
}()
@ -345,3 +363,7 @@ func (m *containerMonitor) resetContainer(lock bool) {
SysProcAttr: c.SysProcAttr,
}
}
func (m *containerMonitor) logEvent(action string) {
m.supervisor.LogContainerEvent(m.container, action)
}

46
daemon/mounts.go Normal file
View File

@ -0,0 +1,46 @@
package daemon
import (
"strings"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/volume/store"
)
func (daemon *Daemon) prepareMountPoints(container *Container) error {
for _, config := range container.MountPoints {
if len(config.Driver) > 0 {
v, err := daemon.createVolume(config.Name, config.Driver, nil)
if err != nil {
return err
}
config.Volume = v
}
}
return nil
}
func (daemon *Daemon) removeMountPoints(container *Container, rm bool) error {
var rmErrors []string
for _, m := range container.MountPoints {
if m.Volume == nil {
continue
}
daemon.volumes.Decrement(m.Volume)
if rm {
err := daemon.volumes.Remove(m.Volume)
// ErrVolumeInUse is ignored because having this
// volume being referenced by other container is
// not an error, but an implementation detail.
// This prevents docker from logging "ERROR: Volume in use"
// where there is another container using the volume.
if err != nil && err != store.ErrVolumeInUse {
rmErrors = append(rmErrors, err.Error())
}
}
}
if len(rmErrors) > 0 {
return derr.ErrorCodeRemovingVolume.WithArgs(strings.Join(rmErrors, "\n"))
}
return nil
}

View File

@ -134,7 +134,7 @@ func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName strin
if err != nil {
return err
}
return container.ConnectToNetwork(networkName)
return daemon.ConnectToNetwork(container, networkName)
}
// DisconnectContainerFromNetwork disconnects the given container from

View File

@ -11,9 +11,33 @@ func (daemon *Daemon) ContainerPause(name string) error {
return err
}
if err := container.pause(); err != nil {
if err := daemon.containerPause(container); err != nil {
return derr.ErrorCodePauseError.WithArgs(name, err)
}
return nil
}
// containerPause pauses the container execution without stopping the process.
// The execution can be resumed by calling containerUnpause.
func (daemon *Daemon) containerPause(container *Container) error {
container.Lock()
defer container.Unlock()
// We cannot Pause the container which is not running
if !container.Running {
return derr.ErrorCodeNotRunning.WithArgs(container.ID)
}
// We cannot Pause the container which is already paused
if container.Paused {
return derr.ErrorCodeAlreadyPaused.WithArgs(container.ID)
}
if err := daemon.execDriver.Pause(container.command); err != nil {
return err
}
container.Paused = true
daemon.LogContainerEvent(container, "pause")
return nil
}

View File

@ -1,10 +1,11 @@
package daemon
import (
"strings"
"github.com/Sirupsen/logrus"
derr "github.com/docker/docker/errors"
"github.com/docker/libnetwork"
"strings"
)
// ContainerRename changes the name of a container, using the oldName
@ -54,7 +55,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
}
if !container.Running {
container.logEvent("rename")
daemon.LogContainerEvent(container, "rename")
return nil
}
@ -78,6 +79,6 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
return err
}
container.logEvent("rename")
daemon.LogContainerEvent(container, "rename")
return nil
}

View File

@ -8,7 +8,10 @@ func (daemon *Daemon) ContainerResize(name string, height, width int) error {
return err
}
return container.Resize(height, width)
if err = container.Resize(height, width); err == nil {
daemon.LogContainerEvent(container, "resize")
}
return err
}
// ContainerExecResize changes the size of the TTY of the process

View File

@ -15,8 +15,32 @@ func (daemon *Daemon) ContainerRestart(name string, seconds int) error {
if err != nil {
return err
}
if err := container.Restart(seconds); err != nil {
if err := daemon.containerRestart(container, seconds); err != nil {
return derr.ErrorCodeCantRestart.WithArgs(name, err)
}
return nil
}
// containerRestart attempts to gracefully stop and then start the
// container. When stopping, wait for the given duration in seconds to
// gracefully stop, before forcefully terminating the container. If
// given a negative duration, wait forever for a graceful stop.
func (daemon *Daemon) containerRestart(container *Container, seconds int) error {
// Avoid unnecessarily unmounting and then directly mounting
// the container when the container stops and then starts
// again
if err := daemon.Mount(container); err == nil {
defer daemon.Unmount(container)
}
if err := daemon.containerStop(container, seconds); err != nil {
return err
}
if err := daemon.containerStart(container); err != nil {
return err
}
daemon.LogContainerEvent(container, "restart")
return nil
}

View File

@ -3,7 +3,9 @@ package daemon
import (
"runtime"
"github.com/Sirupsen/logrus"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
)
@ -44,9 +46,116 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf
return err
}
if err := container.Start(); err != nil {
if err := daemon.containerStart(container); err != nil {
return derr.ErrorCodeCantStart.WithArgs(name, utils.GetErrorMessage(err))
}
return nil
}
// Start starts a container
func (daemon *Daemon) Start(container *Container) error {
return daemon.containerStart(container)
}
// containerStart prepares the container to run by setting up everything the
// container needs, such as storage and networking, as well as links
// between containers. The container is left waiting for a signal to
// begin running.
func (daemon *Daemon) containerStart(container *Container) (err error) {
container.Lock()
defer container.Unlock()
if container.Running {
return nil
}
if container.removalInProgress || container.Dead {
return derr.ErrorCodeContainerBeingRemoved
}
// if we encounter an error during start we need to ensure that any other
// setup has been cleaned up properly
defer func() {
if err != nil {
container.setError(err)
// if no one else has set it, make sure we don't leave it at zero
if container.ExitCode == 0 {
container.ExitCode = 128
}
container.toDisk()
daemon.Cleanup(container)
daemon.LogContainerEvent(container, "die")
}
}()
if err := daemon.conditionalMountOnStart(container); err != nil {
return err
}
// Make sure NetworkMode has an acceptable value. We do this to ensure
// backwards API compatibility.
container.hostConfig = runconfig.SetDefaultNetModeIfBlank(container.hostConfig)
if err := daemon.initializeNetworking(container); err != nil {
return err
}
linkedEnv, err := daemon.setupLinkedContainers(container)
if err != nil {
return err
}
if err := container.setupWorkingDirectory(); err != nil {
return err
}
env := container.createDaemonEnvironment(linkedEnv)
if err := daemon.populateCommand(container, env); err != nil {
return err
}
if !container.hostConfig.IpcMode.IsContainer() && !container.hostConfig.IpcMode.IsHost() {
if err := daemon.setupIpcDirs(container); err != nil {
return err
}
}
mounts, err := daemon.setupMounts(container)
if err != nil {
return err
}
mounts = append(mounts, container.ipcMounts()...)
container.command.Mounts = mounts
return daemon.waitForStart(container)
}
func (daemon *Daemon) waitForStart(container *Container) error {
container.monitor = daemon.newContainerMonitor(container, container.hostConfig.RestartPolicy)
// block until we either receive an error from the initial start of the container's
// process or until the process is running in the container
select {
case <-container.monitor.startSignal:
case err := <-promise.Go(container.monitor.Start):
return err
}
return nil
}
// Cleanup releases any network resources allocated to the container along with any rules
// around how containers are linked together. It also unmounts the container's root filesystem.
func (daemon *Daemon) Cleanup(container *Container) {
daemon.releaseNetwork(container)
container.unmountIpcMounts(detachMounted)
daemon.conditionalUnmountOnCleanup(container)
for _, eConfig := range container.execCommands.s {
daemon.unregisterExecCommand(eConfig)
}
if err := container.unmountVolumes(false); err != nil {
logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
}
}

View File

@ -134,7 +134,7 @@ func (s *State) waitRunning(timeout time.Duration) (int, error) {
if err := wait(waitChan, timeout); err != nil {
return -1, err
}
return s.getPID(), nil
return s.GetPID(), nil
}
// WaitStop waits until state is stopped. If state already stopped it returns
@ -164,7 +164,7 @@ func (s *State) IsRunning() bool {
}
// GetPID holds the process id of a container.
func (s *State) getPID() int {
func (s *State) GetPID() int {
s.Lock()
res := s.Pid
s.Unlock()

View File

@ -14,18 +14,22 @@ import (
"github.com/docker/docker/daemon/execdriver"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/pkg/pubsub"
lntypes "github.com/docker/libnetwork/types"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/system"
)
type statsSupervisor interface {
// GetContainerStats collects all the stats related to a container
GetContainerStats(container *Container) (*execdriver.ResourceStats, error)
}
// newStatsCollector returns a new statsCollector that collections
// network and cgroup stats for a registered container at the specified
// interval. The collector allows non-running containers to be added
// and will start processing stats when they are started.
func newStatsCollector(interval time.Duration) *statsCollector {
func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector {
s := &statsCollector{
interval: interval,
supervisor: daemon,
publishers: make(map[*Container]*pubsub.Publisher),
clockTicksPerSecond: uint64(system.GetClockTicks()),
bufReader: bufio.NewReaderSize(nil, 128),
@ -37,6 +41,7 @@ func newStatsCollector(interval time.Duration) *statsCollector {
// statsCollector manages and provides container resource stats
type statsCollector struct {
m sync.Mutex
supervisor statsSupervisor
interval time.Duration
clockTicksPerSecond uint64
publishers map[*Container]*pubsub.Publisher
@ -112,7 +117,7 @@ func (s *statsCollector) run() {
}
for _, pair := range pairs {
stats, err := pair.container.stats()
stats, err := s.supervisor.GetContainerStats(pair.container)
if err != nil {
if err != execdriver.ErrNotRunning {
logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err)
@ -121,10 +126,6 @@ func (s *statsCollector) run() {
}
stats.SystemUsage = systemUsage
// Retrieve the nw statistics from libnetwork and inject them in the Stats
if nwStats, err := s.getNetworkStats(pair.container); err == nil {
stats.Interfaces = nwStats
}
pair.publisher.Publish(stats)
}
}
@ -177,37 +178,3 @@ func (s *statsCollector) getSystemCPUUsage() (uint64, error) {
}
return 0, derr.ErrorCodeBadStatFormat
}
func (s *statsCollector) getNetworkStats(c *Container) ([]*libcontainer.NetworkInterface, error) {
var list []*libcontainer.NetworkInterface
sb, err := c.daemon.netController.SandboxByID(c.NetworkSettings.SandboxID)
if err != nil {
return list, err
}
stats, err := sb.Statistics()
if err != nil {
return list, err
}
// Convert libnetwork nw stats into libcontainer nw stats
for ifName, ifStats := range stats {
list = append(list, convertLnNetworkStats(ifName, ifStats))
}
return list, nil
}
func convertLnNetworkStats(name string, stats *lntypes.InterfaceStatistics) *libcontainer.NetworkInterface {
n := &libcontainer.NetworkInterface{Name: name}
n.RxBytes = stats.RxBytes
n.RxPackets = stats.RxPackets
n.RxErrors = stats.RxErrors
n.RxDropped = stats.RxDropped
n.TxBytes = stats.TxBytes
n.TxPackets = stats.TxPackets
n.TxErrors = stats.TxErrors
n.TxDropped = stats.TxDropped
return n
}

View File

@ -6,7 +6,7 @@ import "time"
// for a registered container at the specified interval. The collector allows
// non-running containers to be added and will start processing stats when
// they are started.
func newStatsCollector(interval time.Duration) *statsCollector {
func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector {
return &statsCollector{}
}

View File

@ -1,6 +1,9 @@
package daemon
import (
"time"
"github.com/Sirupsen/logrus"
derr "github.com/docker/docker/errors"
)
@ -18,8 +21,40 @@ func (daemon *Daemon) ContainerStop(name string, seconds int) error {
if !container.IsRunning() {
return derr.ErrorCodeStopped
}
if err := container.Stop(seconds); err != nil {
if err := daemon.containerStop(container, seconds); err != nil {
return derr.ErrorCodeCantStop.WithArgs(name, err)
}
return nil
}
// containerStop halts a container by sending a stop signal, waiting for the given
// duration in seconds, and then calling SIGKILL and waiting for the
// process to exit. If a negative duration is given, Stop will wait
// for the initial signal forever. If the container is not running Stop returns
// immediately.
func (daemon *Daemon) containerStop(container *Container, seconds int) error {
if !container.IsRunning() {
return nil
}
// 1. Send a SIGTERM
if err := daemon.killPossiblyDeadProcess(container, container.stopSignal()); err != nil {
logrus.Infof("Failed to send SIGTERM to the process, force killing")
if err := daemon.killPossiblyDeadProcess(container, 9); err != nil {
return err
}
}
// 2. Wait for the process to exit on its own
if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil {
logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
// 3. If it doesn't, then send SIGKILL
if err := daemon.Kill(container); err != nil {
container.WaitStop(-1 * time.Second)
logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it
}
}
daemon.LogContainerEvent(container, "stop")
return nil
}

View File

@ -76,6 +76,6 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container
}
}
}
container.logEvent("top")
daemon.LogContainerEvent(container, "top")
return procList, nil
}

View File

@ -11,9 +11,33 @@ func (daemon *Daemon) ContainerUnpause(name string) error {
return err
}
if err := container.unpause(); err != nil {
if err := daemon.containerUnpause(container); err != nil {
return derr.ErrorCodeCantUnpause.WithArgs(name, err)
}
return nil
}
// containerUnpause resumes the container execution after the container is paused.
func (daemon *Daemon) containerUnpause(container *Container) error {
container.Lock()
defer container.Unlock()
// We cannot unpause the container which is not running
if !container.Running {
return derr.ErrorCodeNotRunning.WithArgs(container.ID)
}
// We cannot unpause the container which is not paused
if !container.Paused {
return derr.ErrorCodeNotPaused.WithArgs(container.ID)
}
if err := daemon.execDriver.Unpause(container.command); err != nil {
return err
}
container.Paused = false
daemon.LogContainerEvent(container, "unpause")
return nil
}

View File

@ -58,7 +58,7 @@ func copyOwnership(source, destination string) error {
// setupMounts iterates through each of the mount points for a container and
// calls Setup() on each. It also looks to see if is a network mount such as
// /etc/resolv.conf, and if it is not, appends it to the array of mounts.
func (container *Container) setupMounts() ([]execdriver.Mount, error) {
func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, error) {
var mounts []execdriver.Mount
for _, m := range container.MountPoints {
path, err := m.Setup()
@ -79,7 +79,7 @@ func (container *Container) setupMounts() ([]execdriver.Mount, error) {
// if we are going to mount any of the network files from container
// metadata, the ownership must be set properly for potential container
// remapped root (user namespaces)
rootUID, rootGID := container.daemon.GetRemappedUIDGID()
rootUID, rootGID := daemon.GetRemappedUIDGID()
for _, mount := range netMounts {
if err := os.Chown(mount.Source, rootUID, rootGID); err != nil {
return nil, err

View File

@ -3,17 +3,18 @@
package daemon
import (
"sort"
"github.com/docker/docker/daemon/execdriver"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/volume"
"sort"
)
// setupMounts configures the mount points for a container by appending each
// of the configured mounts on the container to the execdriver mount structure
// which will ultimately be passed into the exec driver during container creation.
// It also ensures each of the mounts are lexographically sorted.
func (container *Container) setupMounts() ([]execdriver.Mount, error) {
func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, error) {
var mnts []execdriver.Mount
for _, mount := range container.MountPoints { // type is volume.MountPoint
// If there is no source, take it from the volume path

View File

@ -66,6 +66,15 @@ func (n NetworkMode) IsNone() bool {
return n == "none"
}
// ConnectedContainer is the id of the container which network this container is connected to.
func (n NetworkMode) ConnectedContainer() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()