1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Remove further references to the daemon within containers.

Signed-off-by: David Calavera <david.calavera@gmail.com>
This commit is contained in:
David Calavera 2015-11-03 14:25:22 -05:00
parent 444c82d19d
commit 63efc12070
15 changed files with 49 additions and 39 deletions

View file

@ -142,7 +142,7 @@ func (daemon *Daemon) containerStatPath(container *Container, path string) (stat
} }
defer daemon.Unmount(container) defer daemon.Unmount(container)
err = container.mountVolumes() err = daemon.mountVolumes(container)
defer container.unmountVolumes(true) defer container.unmountVolumes(true)
if err != nil { if err != nil {
return nil, err return nil, err
@ -184,7 +184,7 @@ func (daemon *Daemon) containerArchivePath(container *Container, path string) (c
} }
}() }()
if err = container.mountVolumes(); err != nil { if err = daemon.mountVolumes(container); err != nil {
return nil, nil, err return nil, nil, err
} }
@ -239,7 +239,7 @@ func (daemon *Daemon) containerExtractToDir(container *Container, path string, n
} }
defer daemon.Unmount(container) defer daemon.Unmount(container)
err = container.mountVolumes() err = daemon.mountVolumes(container)
defer container.unmountVolumes(true) defer container.unmountVolumes(true)
if err != nil { if err != nil {
return err return err
@ -348,7 +348,7 @@ func (daemon *Daemon) containerCopy(container *Container, resource string) (rc i
} }
}() }()
if err := container.mountVolumes(); err != nil { if err := daemon.mountVolumes(container); err != nil {
return nil, err return nil, err
} }

View file

@ -498,8 +498,8 @@ func (container *Container) shouldRestart() bool {
(container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0)
} }
func (container *Container) mountVolumes() error { func (daemon *Daemon) mountVolumes(container *Container) error {
mounts, err := container.setupMounts() mounts, err := daemon.setupMounts(container)
if err != nil { if err != nil {
return err return err
} }

View file

@ -744,14 +744,14 @@ func (daemon *Daemon) updateNetworkSettings(container *Container, n libnetwork.N
return nil return nil
} }
func (container *Container) updateEndpointNetworkSettings(n libnetwork.Network, ep libnetwork.Endpoint) error { func (daemon *Daemon) updateEndpointNetworkSettings(container *Container, n libnetwork.Network, ep libnetwork.Endpoint) error {
networkSettings, err := container.buildEndpointInfo(n, ep, container.NetworkSettings) networkSettings, err := container.buildEndpointInfo(n, ep, container.NetworkSettings)
if err != nil { if err != nil {
return err return err
} }
if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") { if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
networkSettings.Bridge = container.daemon.configStore.Bridge.Iface networkSettings.Bridge = daemon.configStore.Bridge.Iface
} }
return nil return nil
@ -1006,7 +1006,7 @@ func (daemon *Daemon) connectToNetwork(container *Container, idOrName string, up
} }
}() }()
if err := container.updateEndpointNetworkSettings(n, ep); err != nil { if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil {
return err return err
} }
@ -1075,13 +1075,13 @@ func (daemon *Daemon) initializeNetworking(container *Container) error {
// called from the libcontainer pre-start hook to set the network // called from the libcontainer pre-start hook to set the network
// namespace configuration linkage to the libnetwork "sandbox" entity // namespace configuration linkage to the libnetwork "sandbox" entity
func (daemon *Daemon) setNetworkNamespaceKey(containerId string, pid int) error { func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error {
path := fmt.Sprintf("/proc/%d/ns/net", pid) path := fmt.Sprintf("/proc/%d/ns/net", pid)
var sandbox libnetwork.Sandbox var sandbox libnetwork.Sandbox
search := libnetwork.SandboxContainerWalker(&sandbox, containerId) search := libnetwork.SandboxContainerWalker(&sandbox, containerID)
daemon.netController.WalkSandboxes(search) daemon.netController.WalkSandboxes(search)
if sandbox == nil { if sandbox == nil {
return derr.ErrorCodeNoSandbox.WithArgs(containerId) return derr.ErrorCodeNoSandbox.WithArgs(containerID)
} }
return sandbox.SetKey(path) return sandbox.SetKey(path)
@ -1126,16 +1126,16 @@ func (container *Container) setupWorkingDirectory() error {
return nil return nil
} }
func (daemon *Daemon) getNetworkedContainer(containerId, connectedContainerId string) (*Container, error) { func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*Container, error) {
nc, err := daemon.Get(connectedContainerId) nc, err := daemon.Get(connectedContainerID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if containerId == nc.ID { if containerID == nc.ID {
return nil, derr.ErrorCodeJoinSelf return nil, derr.ErrorCodeJoinSelf
} }
if !nc.IsRunning() { if !nc.IsRunning() {
return nil, derr.ErrorCodeJoinRunning.WithArgs(connectedContainerId) return nil, derr.ErrorCodeJoinRunning.WithArgs(connectedContainerID)
} }
return nc, nil return nc, nil
} }

View file

@ -149,7 +149,7 @@ func (daemon *Daemon) getSize(container *Container) (int64, int64) {
} }
// setNetworkNamespaceKey is a no-op on Windows. // setNetworkNamespaceKey is a no-op on Windows.
func (daemon *Daemon) setNetworkNamespaceKey(containerId string, pid int) error { func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error {
return nil return nil
} }

View file

@ -119,7 +119,7 @@ func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, re
} }
defer daemon.Unmount(container) defer daemon.Unmount(container)
if err := createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig, img); err != nil { if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig, img); err != nil {
return nil, err return nil, err
} }

View file

@ -15,7 +15,7 @@ import (
) )
// createContainerPlatformSpecificSettings performs platform specific container create functionality // createContainerPlatformSpecificSettings performs platform specific container create functionality
func createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error { func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
for spec := range config.Volumes { for spec := range config.Volumes {
name := stringid.GenerateNonCryptoID() name := stringid.GenerateNonCryptoID()
destination := filepath.Clean(spec) destination := filepath.Clean(spec)
@ -45,7 +45,7 @@ func createContainerPlatformSpecificSettings(container *Container, config *runco
} }
} }
v, err := container.daemon.createVolume(name, volumeDriver, nil) v, err := daemon.createVolume(name, volumeDriver, nil)
if err != nil { if err != nil {
return err return err
} }

View file

@ -10,7 +10,7 @@ import (
) )
// createContainerPlatformSpecificSettings performs platform specific container create functionality // createContainerPlatformSpecificSettings performs platform specific container create functionality
func createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error { func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
for spec := range config.Volumes { for spec := range config.Volumes {
mp, err := volume.ParseMountSpec(spec, hostConfig.VolumeDriver) mp, err := volume.ParseMountSpec(spec, hostConfig.VolumeDriver)
@ -41,7 +41,7 @@ func createContainerPlatformSpecificSettings(container *Container, config *runco
// Create the volume in the volume driver. If it doesn't exist, // Create the volume in the volume driver. If it doesn't exist,
// a new one will be created. // a new one will be created.
v, err := container.daemon.createVolume(mp.Name, volumeDriver, nil) v, err := daemon.createVolume(mp.Name, volumeDriver, nil)
if err != nil { if err != nil {
return err return err
} }

View file

@ -194,7 +194,7 @@ func (daemon *Daemon) load(id string) (*Container, error) {
// Register makes a container object usable by the daemon as <container.ID> // Register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) Register(container *Container) error { func (daemon *Daemon) Register(container *Container) error {
if container.daemon != nil || daemon.Exists(container.ID) { if daemon.Exists(container.ID) {
return fmt.Errorf("Container is already loaded") return fmt.Errorf("Container is already loaded")
} }
if err := validateID(container.ID); err != nil { if err := validateID(container.ID); err != nil {
@ -204,8 +204,6 @@ func (daemon *Daemon) Register(container *Container) error {
return err return err
} }
container.daemon = daemon
// Attach to stdout and stderr // Attach to stdout and stderr
container.stderr = new(broadcaster.Unbuffered) container.stderr = new(broadcaster.Unbuffered)
container.stdout = new(broadcaster.Unbuffered) container.stdout = new(broadcaster.Unbuffered)
@ -954,7 +952,8 @@ func (daemon *Daemon) Unmount(container *Container) error {
return daemon.driver.Put(container.ID) return daemon.driver.Put(container.ID)
} }
func (daemon *Daemon) run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) { // Run uses the execution driver to run a given container
func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
hooks := execdriver.Hooks{ hooks := execdriver.Hooks{
Start: startCallback, Start: startCallback,
} }
@ -1303,6 +1302,12 @@ func (daemon *Daemon) SearchRegistryForImages(term string,
return daemon.RegistryService.Search(term, authConfig, headers) return daemon.RegistryService.Search(term, authConfig, headers)
} }
// IsShuttingDown tells whether the daemon is shutting down or not
func (daemon *Daemon) IsShuttingDown() bool {
return daemon.shutdown
}
// GetContainerStats collects all the stats published by a container
func (daemon *Daemon) GetContainerStats(container *Container) (*execdriver.ResourceStats, error) { func (daemon *Daemon) GetContainerStats(container *Container) (*execdriver.ResourceStats, error) {
stats, err := daemon.stats(container) stats, err := daemon.stats(container)
if err != nil { if err != nil {

View file

@ -331,7 +331,7 @@ func (d *Daemon) containerExecIds() map[string]struct{} {
return ids return ids
} }
func (daemon *Daemon) containerExec(container *Container, ec *ExecConfig) error { func (d *Daemon) containerExec(container *Container, ec *ExecConfig) error {
container.Lock() container.Lock()
defer container.Unlock() defer container.Unlock()
@ -350,7 +350,7 @@ func (daemon *Daemon) containerExec(container *Container, ec *ExecConfig) error
// We use a callback here instead of a goroutine and an chan for // We use a callback here instead of a goroutine and an chan for
// synchronization purposes // synchronization purposes
cErr := promise.Go(func() error { return daemon.monitorExec(container, ec, callback) }) cErr := promise.Go(func() error { return d.monitorExec(container, ec, callback) })
// Exec should not return until the process is actually running // Exec should not return until the process is actually running
select { select {
@ -362,13 +362,13 @@ func (daemon *Daemon) containerExec(container *Container, ec *ExecConfig) error
return nil return nil
} }
func (daemon *Daemon) monitorExec(container *Container, ExecConfig *ExecConfig, callback execdriver.DriverCallback) error { func (d *Daemon) monitorExec(container *Container, ExecConfig *ExecConfig, callback execdriver.DriverCallback) error {
var ( var (
err error err error
exitCode int exitCode int
) )
pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin) pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin)
exitCode, err = daemon.Exec(container, ExecConfig, pipes, callback) exitCode, err = d.Exec(container, ExecConfig, pipes, callback)
if err != nil { if err != nil {
logrus.Errorf("Error running command in existing container %s: %s", container.ID, err) logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
} }

View file

@ -25,6 +25,10 @@ type containerSupervisor interface {
Cleanup(*Container) Cleanup(*Container)
// StartLogging starts the logging driver for the container // StartLogging starts the logging driver for the container
StartLogging(*Container) error StartLogging(*Container) error
// Run starts a container
Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error)
// IsShuttingDown tells whether the supervisor is shutting down or not
IsShuttingDown() bool
} }
// containerMonitor monitors the execution of a container's main process. // containerMonitor monitors the execution of a container's main process.
@ -156,7 +160,7 @@ func (m *containerMonitor) Start() error {
m.lastStartTime = time.Now() m.lastStartTime = time.Now()
if exitStatus, err = m.container.daemon.run(m.container, pipes, m.callback); err != nil { if exitStatus, err = m.supervisor.Run(m.container, pipes, m.callback); err != nil {
// if we receive an internal error from the initial start of a container then lets // if we receive an internal error from the initial start of a container then lets
// return it instead of entering the restart loop // return it instead of entering the restart loop
if m.container.RestartCount == 0 { if m.container.RestartCount == 0 {
@ -236,7 +240,7 @@ func (m *containerMonitor) shouldRestart(exitCode int) bool {
// do not restart if the user or docker has requested that this container be stopped // do not restart if the user or docker has requested that this container be stopped
if m.shouldStop { if m.shouldStop {
m.container.HasBeenManuallyStopped = !m.container.daemon.shutdown m.container.HasBeenManuallyStopped = !m.supervisor.IsShuttingDown()
return false return false
} }

View file

@ -26,7 +26,7 @@ func (daemon *Daemon) removeMountPoints(container *Container, rm bool) error {
if m.Volume == nil { if m.Volume == nil {
continue continue
} }
container.daemon.volumes.Decrement(m.Volume) daemon.volumes.Decrement(m.Volume)
if rm { if rm {
err := daemon.volumes.Remove(m.Volume) err := daemon.volumes.Remove(m.Volume)
// ErrVolumeInUse is ignored because having this // ErrVolumeInUse is ignored because having this

View file

@ -118,7 +118,7 @@ func (daemon *Daemon) containerStart(container *Container) (err error) {
} }
} }
mounts, err := container.setupMounts() mounts, err := daemon.setupMounts(container)
if err != nil { if err != nil {
return err return err
} }

View file

@ -6,7 +6,7 @@ import "time"
// for a registered container at the specified interval. The collector allows // for a registered container at the specified interval. The collector allows
// non-running containers to be added and will start processing stats when // non-running containers to be added and will start processing stats when
// they are started. // they are started.
func newStatsCollector(interval time.Duration) *statsCollector { func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector {
return &statsCollector{} return &statsCollector{}
} }

View file

@ -58,7 +58,7 @@ func copyOwnership(source, destination string) error {
// setupMounts iterates through each of the mount points for a container and // setupMounts iterates through each of the mount points for a container and
// calls Setup() on each. It also looks to see if is a network mount such as // calls Setup() on each. It also looks to see if is a network mount such as
// /etc/resolv.conf, and if it is not, appends it to the array of mounts. // /etc/resolv.conf, and if it is not, appends it to the array of mounts.
func (container *Container) setupMounts() ([]execdriver.Mount, error) { func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, error) {
var mounts []execdriver.Mount var mounts []execdriver.Mount
for _, m := range container.MountPoints { for _, m := range container.MountPoints {
path, err := m.Setup() path, err := m.Setup()
@ -79,7 +79,7 @@ func (container *Container) setupMounts() ([]execdriver.Mount, error) {
// if we are going to mount any of the network files from container // if we are going to mount any of the network files from container
// metadata, the ownership must be set properly for potential container // metadata, the ownership must be set properly for potential container
// remapped root (user namespaces) // remapped root (user namespaces)
rootUID, rootGID := container.daemon.GetRemappedUIDGID() rootUID, rootGID := daemon.GetRemappedUIDGID()
for _, mount := range netMounts { for _, mount := range netMounts {
if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { if err := os.Chown(mount.Source, rootUID, rootGID); err != nil {
return nil, err return nil, err

View file

@ -3,17 +3,18 @@
package daemon package daemon
import ( import (
"sort"
"github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver"
derr "github.com/docker/docker/errors" derr "github.com/docker/docker/errors"
"github.com/docker/docker/volume" "github.com/docker/docker/volume"
"sort"
) )
// setupMounts configures the mount points for a container by appending each // setupMounts configures the mount points for a container by appending each
// of the configured mounts on the container to the execdriver mount structure // of the configured mounts on the container to the execdriver mount structure
// which will ultimately be passed into the exec driver during container creation. // which will ultimately be passed into the exec driver during container creation.
// It also ensures each of the mounts are lexographically sorted. // It also ensures each of the mounts are lexographically sorted.
func (container *Container) setupMounts() ([]execdriver.Mount, error) { func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, error) {
var mnts []execdriver.Mount var mnts []execdriver.Mount
for _, mount := range container.MountPoints { // type is volume.MountPoint for _, mount := range container.MountPoints { // type is volume.MountPoint
// If there is no source, take it from the volume path // If there is no source, take it from the volume path