mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
daemon: rename all receivers to "daemon"
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
5d040cbd16
commit
797ec8e913
6 changed files with 46 additions and 46 deletions
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *Daemon) setupDumpStackTrap(root string) {
|
func (daemon *Daemon) setupDumpStackTrap(root string) {
|
||||||
c := make(chan os.Signal, 1)
|
c := make(chan os.Signal, 1)
|
||||||
signal.Notify(c, unix.SIGUSR1)
|
signal.Notify(c, unix.SIGUSR1)
|
||||||
go func() {
|
go func() {
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,6 @@
|
||||||
|
|
||||||
package daemon // import "github.com/docker/docker/daemon"
|
package daemon // import "github.com/docker/docker/daemon"
|
||||||
|
|
||||||
func (d *Daemon) setupDumpStackTrap(_ string) {
|
func (daemon *Daemon) setupDumpStackTrap(_ string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *Daemon) setupDumpStackTrap(root string) {
|
func (daemon *Daemon) setupDumpStackTrap(root string) {
|
||||||
// Windows does not support signals like *nix systems. So instead of
|
// Windows does not support signals like *nix systems. So instead of
|
||||||
// trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be
|
// trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be
|
||||||
// signaled. ACL'd to builtin administrators and local system
|
// signaled. ACL'd to builtin administrators and local system
|
||||||
|
|
|
||||||
|
|
@ -25,17 +25,17 @@ import (
|
||||||
// Seconds to wait after sending TERM before trying KILL
|
// Seconds to wait after sending TERM before trying KILL
|
||||||
const termProcessTimeout = 10 * time.Second
|
const termProcessTimeout = 10 * time.Second
|
||||||
|
|
||||||
func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) {
|
func (daemon *Daemon) registerExecCommand(container *container.Container, config *exec.Config) {
|
||||||
// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
|
// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
|
||||||
container.ExecCommands.Add(config.ID, config)
|
container.ExecCommands.Add(config.ID, config)
|
||||||
// Storing execs in daemon for easy access via Engine API.
|
// Storing execs in daemon for easy access via Engine API.
|
||||||
d.execCommands.Add(config.ID, config)
|
daemon.execCommands.Add(config.ID, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecExists looks up the exec instance and returns a bool if it exists or not.
|
// ExecExists looks up the exec instance and returns a bool if it exists or not.
|
||||||
// It will also return the error produced by `getConfig`
|
// It will also return the error produced by `getConfig`
|
||||||
func (d *Daemon) ExecExists(name string) (bool, error) {
|
func (daemon *Daemon) ExecExists(name string) (bool, error) {
|
||||||
if _, err := d.getExecConfig(name); err != nil {
|
if _, err := daemon.getExecConfig(name); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
@ -43,8 +43,8 @@ func (d *Daemon) ExecExists(name string) (bool, error) {
|
||||||
|
|
||||||
// getExecConfig looks up the exec instance by name. If the container associated
|
// getExecConfig looks up the exec instance by name. If the container associated
|
||||||
// with the exec instance is stopped or paused, it will return an error.
|
// with the exec instance is stopped or paused, it will return an error.
|
||||||
func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
|
func (daemon *Daemon) getExecConfig(name string) (*exec.Config, error) {
|
||||||
ec := d.execCommands.Get(name)
|
ec := daemon.execCommands.Get(name)
|
||||||
if ec == nil {
|
if ec == nil {
|
||||||
return nil, errExecNotFound(name)
|
return nil, errExecNotFound(name)
|
||||||
}
|
}
|
||||||
|
|
@ -54,7 +54,7 @@ func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
|
||||||
// saying the container isn't running, we should return a 404 so that
|
// saying the container isn't running, we should return a 404 so that
|
||||||
// the user sees the same error now that they will after the
|
// the user sees the same error now that they will after the
|
||||||
// 5 minute clean-up loop is run which erases old/dead execs.
|
// 5 minute clean-up loop is run which erases old/dead execs.
|
||||||
container := d.containers.Get(ec.ContainerID)
|
container := daemon.containers.Get(ec.ContainerID)
|
||||||
if container == nil {
|
if container == nil {
|
||||||
return nil, containerNotFound(name)
|
return nil, containerNotFound(name)
|
||||||
}
|
}
|
||||||
|
|
@ -70,13 +70,13 @@ func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
|
||||||
return ec, nil
|
return ec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) {
|
func (daemon *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) {
|
||||||
container.ExecCommands.Delete(execConfig.ID, execConfig.Pid)
|
container.ExecCommands.Delete(execConfig.ID, execConfig.Pid)
|
||||||
d.execCommands.Delete(execConfig.ID, execConfig.Pid)
|
daemon.execCommands.Delete(execConfig.ID, execConfig.Pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Daemon) getActiveContainer(name string) (*container.Container, error) {
|
func (daemon *Daemon) getActiveContainer(name string) (*container.Container, error) {
|
||||||
container, err := d.GetContainer(name)
|
container, err := daemon.GetContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -94,14 +94,14 @@ func (d *Daemon) getActiveContainer(name string) (*container.Container, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerExecCreate sets up an exec in a running container.
|
// ContainerExecCreate sets up an exec in a running container.
|
||||||
func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) {
|
func (daemon *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) {
|
||||||
cntr, err := d.getActiveContainer(name)
|
cntr, err := daemon.getActiveContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := strslice.StrSlice(config.Cmd)
|
cmd := strslice.StrSlice(config.Cmd)
|
||||||
entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd)
|
entrypoint, args := daemon.getEntrypointAndArgs(strslice.StrSlice{}, cmd)
|
||||||
|
|
||||||
keys := []byte{}
|
keys := []byte{}
|
||||||
if config.DetachKeys != "" {
|
if config.DetachKeys != "" {
|
||||||
|
|
@ -125,7 +125,7 @@ func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (str
|
||||||
execConfig.User = config.User
|
execConfig.User = config.User
|
||||||
execConfig.WorkingDir = config.WorkingDir
|
execConfig.WorkingDir = config.WorkingDir
|
||||||
|
|
||||||
linkedEnv, err := d.setupLinkedContainers(cntr)
|
linkedEnv, err := daemon.setupLinkedContainers(cntr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
@ -137,12 +137,12 @@ func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (str
|
||||||
execConfig.WorkingDir = cntr.Config.WorkingDir
|
execConfig.WorkingDir = cntr.Config.WorkingDir
|
||||||
}
|
}
|
||||||
|
|
||||||
d.registerExecCommand(cntr, execConfig)
|
daemon.registerExecCommand(cntr, execConfig)
|
||||||
|
|
||||||
attributes := map[string]string{
|
attributes := map[string]string{
|
||||||
"execID": execConfig.ID,
|
"execID": execConfig.ID,
|
||||||
}
|
}
|
||||||
d.LogContainerEventWithAttributes(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " "), attributes)
|
daemon.LogContainerEventWithAttributes(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " "), attributes)
|
||||||
|
|
||||||
return execConfig.ID, nil
|
return execConfig.ID, nil
|
||||||
}
|
}
|
||||||
|
|
@ -150,13 +150,13 @@ func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (str
|
||||||
// ContainerExecStart starts a previously set up exec instance. The
|
// ContainerExecStart starts a previously set up exec instance. The
|
||||||
// std streams are set up.
|
// std streams are set up.
|
||||||
// If ctx is cancelled, the process is terminated.
|
// If ctx is cancelled, the process is terminated.
|
||||||
func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.Reader, stdout io.Writer, stderr io.Writer) (err error) {
|
func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.Reader, stdout io.Writer, stderr io.Writer) (err error) {
|
||||||
var (
|
var (
|
||||||
cStdin io.ReadCloser
|
cStdin io.ReadCloser
|
||||||
cStdout, cStderr io.Writer
|
cStdout, cStderr io.Writer
|
||||||
)
|
)
|
||||||
|
|
||||||
ec, err := d.getExecConfig(name)
|
ec, err := daemon.getExecConfig(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errExecNotFound(name)
|
return errExecNotFound(name)
|
||||||
}
|
}
|
||||||
|
|
@ -175,12 +175,12 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
|
||||||
ec.Running = true
|
ec.Running = true
|
||||||
ec.Unlock()
|
ec.Unlock()
|
||||||
|
|
||||||
c := d.containers.Get(ec.ContainerID)
|
c := daemon.containers.Get(ec.ContainerID)
|
||||||
logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID)
|
logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID)
|
||||||
attributes := map[string]string{
|
attributes := map[string]string{
|
||||||
"execID": ec.ID,
|
"execID": ec.ID,
|
||||||
}
|
}
|
||||||
d.LogContainerEventWithAttributes(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " "), attributes)
|
daemon.LogContainerEventWithAttributes(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " "), attributes)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -220,7 +220,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
|
||||||
|
|
||||||
p := &specs.Process{}
|
p := &specs.Process{}
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
container, err := d.containerdCli.LoadContainer(ctx, ec.ContainerID)
|
container, err := daemon.containerdCli.LoadContainer(ctx, ec.ContainerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -239,7 +239,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
|
||||||
p.Cwd = "/"
|
p.Cwd = "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.execSetPlatformOpt(c, ec, p); err != nil {
|
if err := daemon.execSetPlatformOpt(c, ec, p); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -260,7 +260,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
|
||||||
// Synchronize with libcontainerd event loop
|
// Synchronize with libcontainerd event loop
|
||||||
ec.Lock()
|
ec.Lock()
|
||||||
c.ExecCommands.Lock()
|
c.ExecCommands.Lock()
|
||||||
systemPid, err := d.containerd.Exec(ctx, c.ID, ec.ID, p, cStdin != nil, ec.InitializeStdio)
|
systemPid, err := daemon.containerd.Exec(ctx, c.ID, ec.ID, p, cStdin != nil, ec.InitializeStdio)
|
||||||
// the exec context should be ready, or error happened.
|
// the exec context should be ready, or error happened.
|
||||||
// close the chan to notify readiness
|
// close the chan to notify readiness
|
||||||
close(ec.Started)
|
close(ec.Started)
|
||||||
|
|
@ -276,7 +276,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID)
|
logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID)
|
||||||
d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["TERM"]))
|
daemon.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["TERM"]))
|
||||||
|
|
||||||
timeout := time.NewTimer(termProcessTimeout)
|
timeout := time.NewTimer(termProcessTimeout)
|
||||||
defer timeout.Stop()
|
defer timeout.Stop()
|
||||||
|
|
@ -284,7 +284,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
|
||||||
select {
|
select {
|
||||||
case <-timeout.C:
|
case <-timeout.C:
|
||||||
logrus.Infof("Container %v, process %v failed to exit within %v of signal TERM - using the force", c.ID, name, termProcessTimeout)
|
logrus.Infof("Container %v, process %v failed to exit within %v of signal TERM - using the force", c.ID, name, termProcessTimeout)
|
||||||
d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["KILL"]))
|
daemon.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["KILL"]))
|
||||||
case <-attachErr:
|
case <-attachErr:
|
||||||
// TERM signal worked
|
// TERM signal worked
|
||||||
}
|
}
|
||||||
|
|
@ -297,7 +297,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
|
||||||
attributes := map[string]string{
|
attributes := map[string]string{
|
||||||
"execID": ec.ID,
|
"execID": ec.ID,
|
||||||
}
|
}
|
||||||
d.LogContainerEventWithAttributes(c, "exec_detach", attributes)
|
daemon.LogContainerEventWithAttributes(c, "exec_detach", attributes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -305,16 +305,16 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
|
||||||
|
|
||||||
// execCommandGC runs a ticker to clean up the daemon references
|
// execCommandGC runs a ticker to clean up the daemon references
|
||||||
// of exec configs that are no longer part of the container.
|
// of exec configs that are no longer part of the container.
|
||||||
func (d *Daemon) execCommandGC() {
|
func (daemon *Daemon) execCommandGC() {
|
||||||
for range time.Tick(5 * time.Minute) {
|
for range time.Tick(5 * time.Minute) {
|
||||||
var (
|
var (
|
||||||
cleaned int
|
cleaned int
|
||||||
liveExecCommands = d.containerExecIds()
|
liveExecCommands = daemon.containerExecIds()
|
||||||
)
|
)
|
||||||
for id, config := range d.execCommands.Commands() {
|
for id, config := range daemon.execCommands.Commands() {
|
||||||
if config.CanRemove {
|
if config.CanRemove {
|
||||||
cleaned++
|
cleaned++
|
||||||
d.execCommands.Delete(id, config.Pid)
|
daemon.execCommands.Delete(id, config.Pid)
|
||||||
} else {
|
} else {
|
||||||
if _, exists := liveExecCommands[id]; !exists {
|
if _, exists := liveExecCommands[id]; !exists {
|
||||||
config.CanRemove = true
|
config.CanRemove = true
|
||||||
|
|
@ -329,9 +329,9 @@ func (d *Daemon) execCommandGC() {
|
||||||
|
|
||||||
// containerExecIds returns a list of all the current exec ids that are in use
|
// containerExecIds returns a list of all the current exec ids that are in use
|
||||||
// and running inside a container.
|
// and running inside a container.
|
||||||
func (d *Daemon) containerExecIds() map[string]struct{} {
|
func (daemon *Daemon) containerExecIds() map[string]struct{} {
|
||||||
ids := map[string]struct{}{}
|
ids := map[string]struct{}{}
|
||||||
for _, c := range d.containers.List() {
|
for _, c := range daemon.containers.List() {
|
||||||
for _, id := range c.ExecCommands.List() {
|
for _, id := range c.ExecCommands.List() {
|
||||||
ids[id] = struct{}{}
|
ids[id] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -273,7 +273,7 @@ func getProbe(c *container.Container) probe {
|
||||||
// Ensure the health-check monitor is running or not, depending on the current
|
// Ensure the health-check monitor is running or not, depending on the current
|
||||||
// state of the container.
|
// state of the container.
|
||||||
// Called from monitor.go, with c locked.
|
// Called from monitor.go, with c locked.
|
||||||
func (d *Daemon) updateHealthMonitor(c *container.Container) {
|
func (daemon *Daemon) updateHealthMonitor(c *container.Container) {
|
||||||
h := c.State.Health
|
h := c.State.Health
|
||||||
if h == nil {
|
if h == nil {
|
||||||
return // No healthcheck configured
|
return // No healthcheck configured
|
||||||
|
|
@ -283,7 +283,7 @@ func (d *Daemon) updateHealthMonitor(c *container.Container) {
|
||||||
wantRunning := c.Running && !c.Paused && probe != nil
|
wantRunning := c.Running && !c.Paused && probe != nil
|
||||||
if wantRunning {
|
if wantRunning {
|
||||||
if stop := h.OpenMonitorChannel(); stop != nil {
|
if stop := h.OpenMonitorChannel(); stop != nil {
|
||||||
go monitor(d, c, stop, probe)
|
go monitor(daemon, c, stop, probe)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
h.CloseMonitorChannel()
|
h.CloseMonitorChannel()
|
||||||
|
|
@ -294,14 +294,14 @@ func (d *Daemon) updateHealthMonitor(c *container.Container) {
|
||||||
// initHealthMonitor is called from monitor.go and we should never be running
|
// initHealthMonitor is called from monitor.go and we should never be running
|
||||||
// two instances at once.
|
// two instances at once.
|
||||||
// Called with c locked.
|
// Called with c locked.
|
||||||
func (d *Daemon) initHealthMonitor(c *container.Container) {
|
func (daemon *Daemon) initHealthMonitor(c *container.Container) {
|
||||||
// If no healthcheck is setup then don't init the monitor
|
// If no healthcheck is setup then don't init the monitor
|
||||||
if getProbe(c) == nil {
|
if getProbe(c) == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is needed in case we're auto-restarting
|
// This is needed in case we're auto-restarting
|
||||||
d.stopHealthchecks(c)
|
daemon.stopHealthchecks(c)
|
||||||
|
|
||||||
if h := c.State.Health; h != nil {
|
if h := c.State.Health; h != nil {
|
||||||
h.SetStatus(types.Starting)
|
h.SetStatus(types.Starting)
|
||||||
|
|
@ -312,12 +312,12 @@ func (d *Daemon) initHealthMonitor(c *container.Container) {
|
||||||
c.State.Health = h
|
c.State.Health = h
|
||||||
}
|
}
|
||||||
|
|
||||||
d.updateHealthMonitor(c)
|
daemon.updateHealthMonitor(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called when the container is being stopped (whether because the health check is
|
// Called when the container is being stopped (whether because the health check is
|
||||||
// failing or for any other reason).
|
// failing or for any other reason).
|
||||||
func (d *Daemon) stopHealthchecks(c *container.Container) {
|
func (daemon *Daemon) stopHealthchecks(c *container.Container) {
|
||||||
h := c.State.Health
|
h := c.State.Health
|
||||||
if h != nil {
|
if h != nil {
|
||||||
h.CloseMonitorChannel()
|
h.CloseMonitorChannel()
|
||||||
|
|
|
||||||
|
|
@ -115,8 +115,8 @@ func (ctr *stateCounter) Collect(ch chan<- prometheus.Metric) {
|
||||||
ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(stopped), "stopped")
|
ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(stopped), "stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Daemon) cleanupMetricsPlugins() {
|
func (daemon *Daemon) cleanupMetricsPlugins() {
|
||||||
ls := d.PluginStore.GetAllManagedPluginsByCap(metricsPluginType)
|
ls := daemon.PluginStore.GetAllManagedPluginsByCap(metricsPluginType)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(ls))
|
wg.Add(len(ls))
|
||||||
|
|
||||||
|
|
@ -137,8 +137,8 @@ func (d *Daemon) cleanupMetricsPlugins() {
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
if d.metricsPluginListener != nil {
|
if daemon.metricsPluginListener != nil {
|
||||||
d.metricsPluginListener.Close()
|
daemon.metricsPluginListener.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue