1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

daemon: rename all receivers to "daemon"

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2019-08-09 13:19:49 +02:00
parent 5d040cbd16
commit 797ec8e913
No known key found for this signature in database
GPG key ID: 76698F39D527CE8C
6 changed files with 46 additions and 46 deletions

View file

@ -11,7 +11,7 @@ import (
"golang.org/x/sys/unix"
)
func (d *Daemon) setupDumpStackTrap(root string) {
func (daemon *Daemon) setupDumpStackTrap(root string) {
c := make(chan os.Signal, 1)
signal.Notify(c, unix.SIGUSR1)
go func() {

View file

@ -2,6 +2,6 @@
package daemon // import "github.com/docker/docker/daemon"
func (d *Daemon) setupDumpStackTrap(_ string) {
func (daemon *Daemon) setupDumpStackTrap(_ string) {
return
}

View file

@ -10,7 +10,7 @@ import (
"golang.org/x/sys/windows"
)
func (d *Daemon) setupDumpStackTrap(root string) {
func (daemon *Daemon) setupDumpStackTrap(root string) {
// Windows does not support signals like *nix systems. So instead of
// trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be
// signaled. ACL'd to builtin administrators and local system

View file

@ -25,17 +25,17 @@ import (
// Seconds to wait after sending TERM before trying KILL
const termProcessTimeout = 10 * time.Second
func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) {
func (daemon *Daemon) registerExecCommand(container *container.Container, config *exec.Config) {
// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
container.ExecCommands.Add(config.ID, config)
// Storing execs in daemon for easy access via Engine API.
d.execCommands.Add(config.ID, config)
daemon.execCommands.Add(config.ID, config)
}
// ExecExists looks up the exec instance and returns a bool if it exists or not.
// It will also return the error produced by `getConfig`
func (d *Daemon) ExecExists(name string) (bool, error) {
if _, err := d.getExecConfig(name); err != nil {
func (daemon *Daemon) ExecExists(name string) (bool, error) {
if _, err := daemon.getExecConfig(name); err != nil {
return false, err
}
return true, nil
@ -43,8 +43,8 @@ func (d *Daemon) ExecExists(name string) (bool, error) {
// getExecConfig looks up the exec instance by name. If the container associated
// with the exec instance is stopped or paused, it will return an error.
func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
ec := d.execCommands.Get(name)
func (daemon *Daemon) getExecConfig(name string) (*exec.Config, error) {
ec := daemon.execCommands.Get(name)
if ec == nil {
return nil, errExecNotFound(name)
}
@ -54,7 +54,7 @@ func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
// saying the container isn't running, we should return a 404 so that
// the user sees the same error now that they will after the
// 5 minute clean-up loop is run which erases old/dead execs.
container := d.containers.Get(ec.ContainerID)
container := daemon.containers.Get(ec.ContainerID)
if container == nil {
return nil, containerNotFound(name)
}
@ -70,13 +70,13 @@ func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
return ec, nil
}
func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) {
func (daemon *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) {
container.ExecCommands.Delete(execConfig.ID, execConfig.Pid)
d.execCommands.Delete(execConfig.ID, execConfig.Pid)
daemon.execCommands.Delete(execConfig.ID, execConfig.Pid)
}
func (d *Daemon) getActiveContainer(name string) (*container.Container, error) {
container, err := d.GetContainer(name)
func (daemon *Daemon) getActiveContainer(name string) (*container.Container, error) {
container, err := daemon.GetContainer(name)
if err != nil {
return nil, err
}
@ -94,14 +94,14 @@ func (d *Daemon) getActiveContainer(name string) (*container.Container, error) {
}
// ContainerExecCreate sets up an exec in a running container.
func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) {
cntr, err := d.getActiveContainer(name)
func (daemon *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) {
cntr, err := daemon.getActiveContainer(name)
if err != nil {
return "", err
}
cmd := strslice.StrSlice(config.Cmd)
entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd)
entrypoint, args := daemon.getEntrypointAndArgs(strslice.StrSlice{}, cmd)
keys := []byte{}
if config.DetachKeys != "" {
@ -125,7 +125,7 @@ func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (str
execConfig.User = config.User
execConfig.WorkingDir = config.WorkingDir
linkedEnv, err := d.setupLinkedContainers(cntr)
linkedEnv, err := daemon.setupLinkedContainers(cntr)
if err != nil {
return "", err
}
@ -137,12 +137,12 @@ func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (str
execConfig.WorkingDir = cntr.Config.WorkingDir
}
d.registerExecCommand(cntr, execConfig)
daemon.registerExecCommand(cntr, execConfig)
attributes := map[string]string{
"execID": execConfig.ID,
}
d.LogContainerEventWithAttributes(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " "), attributes)
daemon.LogContainerEventWithAttributes(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " "), attributes)
return execConfig.ID, nil
}
@ -150,13 +150,13 @@ func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (str
// ContainerExecStart starts a previously set up exec instance. The
// std streams are set up.
// If ctx is cancelled, the process is terminated.
func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.Reader, stdout io.Writer, stderr io.Writer) (err error) {
func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.Reader, stdout io.Writer, stderr io.Writer) (err error) {
var (
cStdin io.ReadCloser
cStdout, cStderr io.Writer
)
ec, err := d.getExecConfig(name)
ec, err := daemon.getExecConfig(name)
if err != nil {
return errExecNotFound(name)
}
@ -175,12 +175,12 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
ec.Running = true
ec.Unlock()
c := d.containers.Get(ec.ContainerID)
c := daemon.containers.Get(ec.ContainerID)
logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID)
attributes := map[string]string{
"execID": ec.ID,
}
d.LogContainerEventWithAttributes(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " "), attributes)
daemon.LogContainerEventWithAttributes(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " "), attributes)
defer func() {
if err != nil {
@ -220,7 +220,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
p := &specs.Process{}
if runtime.GOOS != "windows" {
container, err := d.containerdCli.LoadContainer(ctx, ec.ContainerID)
container, err := daemon.containerdCli.LoadContainer(ctx, ec.ContainerID)
if err != nil {
return err
}
@ -239,7 +239,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
p.Cwd = "/"
}
if err := d.execSetPlatformOpt(c, ec, p); err != nil {
if err := daemon.execSetPlatformOpt(c, ec, p); err != nil {
return err
}
@ -260,7 +260,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
// Synchronize with libcontainerd event loop
ec.Lock()
c.ExecCommands.Lock()
systemPid, err := d.containerd.Exec(ctx, c.ID, ec.ID, p, cStdin != nil, ec.InitializeStdio)
systemPid, err := daemon.containerd.Exec(ctx, c.ID, ec.ID, p, cStdin != nil, ec.InitializeStdio)
// the exec context should be ready, or error happened.
// close the chan to notify readiness
close(ec.Started)
@ -276,7 +276,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
select {
case <-ctx.Done():
logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID)
d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["TERM"]))
daemon.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["TERM"]))
timeout := time.NewTimer(termProcessTimeout)
defer timeout.Stop()
@ -284,7 +284,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
select {
case <-timeout.C:
logrus.Infof("Container %v, process %v failed to exit within %v of signal TERM - using the force", c.ID, name, termProcessTimeout)
d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["KILL"]))
daemon.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["KILL"]))
case <-attachErr:
// TERM signal worked
}
@ -297,7 +297,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
attributes := map[string]string{
"execID": ec.ID,
}
d.LogContainerEventWithAttributes(c, "exec_detach", attributes)
daemon.LogContainerEventWithAttributes(c, "exec_detach", attributes)
}
}
return nil
@ -305,16 +305,16 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
// execCommandGC runs a ticker to clean up the daemon references
// of exec configs that are no longer part of the container.
func (d *Daemon) execCommandGC() {
func (daemon *Daemon) execCommandGC() {
for range time.Tick(5 * time.Minute) {
var (
cleaned int
liveExecCommands = d.containerExecIds()
liveExecCommands = daemon.containerExecIds()
)
for id, config := range d.execCommands.Commands() {
for id, config := range daemon.execCommands.Commands() {
if config.CanRemove {
cleaned++
d.execCommands.Delete(id, config.Pid)
daemon.execCommands.Delete(id, config.Pid)
} else {
if _, exists := liveExecCommands[id]; !exists {
config.CanRemove = true
@ -329,9 +329,9 @@ func (d *Daemon) execCommandGC() {
// containerExecIds returns a list of all the current exec ids that are in use
// and running inside a container.
func (d *Daemon) containerExecIds() map[string]struct{} {
func (daemon *Daemon) containerExecIds() map[string]struct{} {
ids := map[string]struct{}{}
for _, c := range d.containers.List() {
for _, c := range daemon.containers.List() {
for _, id := range c.ExecCommands.List() {
ids[id] = struct{}{}
}

View file

@ -273,7 +273,7 @@ func getProbe(c *container.Container) probe {
// Ensure the health-check monitor is running or not, depending on the current
// state of the container.
// Called from monitor.go, with c locked.
func (d *Daemon) updateHealthMonitor(c *container.Container) {
func (daemon *Daemon) updateHealthMonitor(c *container.Container) {
h := c.State.Health
if h == nil {
return // No healthcheck configured
@ -283,7 +283,7 @@ func (d *Daemon) updateHealthMonitor(c *container.Container) {
wantRunning := c.Running && !c.Paused && probe != nil
if wantRunning {
if stop := h.OpenMonitorChannel(); stop != nil {
go monitor(d, c, stop, probe)
go monitor(daemon, c, stop, probe)
}
} else {
h.CloseMonitorChannel()
@ -294,14 +294,14 @@ func (d *Daemon) updateHealthMonitor(c *container.Container) {
// initHealthMonitor is called from monitor.go and we should never be running
// two instances at once.
// Called with c locked.
func (d *Daemon) initHealthMonitor(c *container.Container) {
func (daemon *Daemon) initHealthMonitor(c *container.Container) {
// If no healthcheck is setup then don't init the monitor
if getProbe(c) == nil {
return
}
// This is needed in case we're auto-restarting
d.stopHealthchecks(c)
daemon.stopHealthchecks(c)
if h := c.State.Health; h != nil {
h.SetStatus(types.Starting)
@ -312,12 +312,12 @@ func (d *Daemon) initHealthMonitor(c *container.Container) {
c.State.Health = h
}
d.updateHealthMonitor(c)
daemon.updateHealthMonitor(c)
}
// Called when the container is being stopped (whether because the health check is
// failing or for any other reason).
func (d *Daemon) stopHealthchecks(c *container.Container) {
func (daemon *Daemon) stopHealthchecks(c *container.Container) {
h := c.State.Health
if h != nil {
h.CloseMonitorChannel()

View file

@ -115,8 +115,8 @@ func (ctr *stateCounter) Collect(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(stopped), "stopped")
}
func (d *Daemon) cleanupMetricsPlugins() {
ls := d.PluginStore.GetAllManagedPluginsByCap(metricsPluginType)
func (daemon *Daemon) cleanupMetricsPlugins() {
ls := daemon.PluginStore.GetAllManagedPluginsByCap(metricsPluginType)
var wg sync.WaitGroup
wg.Add(len(ls))
@ -137,8 +137,8 @@ func (d *Daemon) cleanupMetricsPlugins() {
}
wg.Wait()
if d.metricsPluginListener != nil {
d.metricsPluginListener.Close()
if daemon.metricsPluginListener != nil {
daemon.metricsPluginListener.Close()
}
}