2018-02-05 16:05:59 -05:00
|
|
|
package daemon // import "github.com/docker/docker/daemon"
|
2016-04-18 05:48:13 -04:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2018-04-19 18:30:59 -04:00
|
|
|
"context"
|
2016-04-18 05:48:13 -04:00
|
|
|
"fmt"
|
|
|
|
"runtime"
|
|
|
|
"strings"
|
2016-09-12 13:24:07 -04:00
|
|
|
"sync"
|
2016-04-18 05:48:13 -04:00
|
|
|
"time"
|
|
|
|
|
2016-09-06 14:18:12 -04:00
|
|
|
"github.com/docker/docker/api/types"
|
2022-06-15 03:28:20 -04:00
|
|
|
containertypes "github.com/docker/docker/api/types/container"
|
2016-09-06 14:18:12 -04:00
|
|
|
"github.com/docker/docker/api/types/strslice"
|
2016-04-18 05:48:13 -04:00
|
|
|
"github.com/docker/docker/container"
|
2017-07-26 17:42:13 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2016-04-18 05:48:13 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// Longest healthcheck probe output message to store. Longer messages will be truncated.
|
|
|
|
maxOutputLen = 4096
|
|
|
|
|
|
|
|
// Default interval between probe runs (from the end of the first to the start of the second).
|
|
|
|
// Also the time before the first probe.
|
|
|
|
defaultProbeInterval = 30 * time.Second
|
|
|
|
|
|
|
|
// The maximum length of time a single probe run should take. If the probe takes longer
|
|
|
|
// than this, the check is considered to have failed.
|
|
|
|
defaultProbeTimeout = 30 * time.Second
|
|
|
|
|
2016-11-29 04:58:47 -05:00
|
|
|
// The time given for the container to start before the health check starts considering
|
|
|
|
// the container unstable. Defaults to none.
|
|
|
|
defaultStartPeriod = 0 * time.Second
|
|
|
|
|
2016-06-03 07:28:08 -04:00
|
|
|
// Default number of consecutive failures of the health check
|
|
|
|
// for the container to be considered unhealthy.
|
|
|
|
defaultProbeRetries = 3
|
|
|
|
|
2016-04-18 05:48:13 -04:00
|
|
|
// Maximum number of entries to record
|
|
|
|
maxLogEntries = 5
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// Exit status codes that can be returned by the probe command.
|
|
|
|
|
2017-08-21 17:51:45 -04:00
|
|
|
exitStatusHealthy = 0 // Container is healthy
|
2016-04-18 05:48:13 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
// probe implementations know how to run a particular type of probe.
|
|
|
|
type probe interface {
|
|
|
|
// Perform one run of the check. Returns the exit code and an optional
|
|
|
|
// short diagnostic string.
|
|
|
|
run(context.Context, *Daemon, *container.Container) (*types.HealthcheckResult, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
// cmdProbe implements the "CMD" probe type.
|
|
|
|
type cmdProbe struct {
|
|
|
|
// Run the command with the system's default shell instead of execing it directly.
|
|
|
|
shell bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// exec the healthcheck command in the container.
|
|
|
|
// Returns the exit code and probe output (if any)
|
2017-05-17 08:30:51 -04:00
|
|
|
func (p *cmdProbe) run(ctx context.Context, d *Daemon, cntr *container.Container) (*types.HealthcheckResult, error) {
|
2022-04-12 17:57:23 -04:00
|
|
|
startTime := time.Now()
|
2017-05-17 08:30:51 -04:00
|
|
|
cmdSlice := strslice.StrSlice(cntr.Config.Healthcheck.Test)[1:]
|
2016-04-18 05:48:13 -04:00
|
|
|
if p.shell {
|
2019-06-21 07:51:59 -04:00
|
|
|
cmdSlice = append(getShell(cntr), cmdSlice...)
|
2016-04-18 05:48:13 -04:00
|
|
|
}
|
|
|
|
entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice)
|
2022-05-10 15:59:00 -04:00
|
|
|
execConfig := container.NewExecConfig(cntr)
|
2016-04-18 05:48:13 -04:00
|
|
|
execConfig.OpenStdin = false
|
|
|
|
execConfig.OpenStdout = true
|
|
|
|
execConfig.OpenStderr = true
|
|
|
|
execConfig.DetachKeys = []byte{}
|
|
|
|
execConfig.Entrypoint = entrypoint
|
|
|
|
execConfig.Args = args
|
|
|
|
execConfig.Tty = false
|
|
|
|
execConfig.Privileged = false
|
2017-05-17 08:30:51 -04:00
|
|
|
execConfig.User = cntr.Config.User
|
2017-12-20 08:02:59 -05:00
|
|
|
execConfig.WorkingDir = cntr.Config.WorkingDir
|
2017-05-17 08:30:51 -04:00
|
|
|
|
|
|
|
linkedEnv, err := d.setupLinkedContainers(cntr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(execConfig.Tty, linkedEnv), execConfig.Env)
|
2016-04-18 05:48:13 -04:00
|
|
|
|
2017-05-17 08:30:51 -04:00
|
|
|
d.registerExecCommand(cntr, execConfig)
|
2017-12-08 03:01:34 -05:00
|
|
|
attributes := map[string]string{
|
|
|
|
"execID": execConfig.ID,
|
|
|
|
}
|
|
|
|
d.LogContainerEventWithAttributes(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " "), attributes)
|
2016-04-18 05:48:13 -04:00
|
|
|
|
|
|
|
output := &limitedBuffer{}
|
2022-04-12 17:57:23 -04:00
|
|
|
probeCtx, cancelProbe := context.WithCancel(ctx)
|
|
|
|
defer cancelProbe()
|
|
|
|
execErr := make(chan error, 1)
|
2022-06-15 03:28:20 -04:00
|
|
|
|
|
|
|
options := containertypes.ExecStartOptions{
|
|
|
|
Stdout: output,
|
|
|
|
Stderr: output,
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() { execErr <- d.ContainerExecStart(probeCtx, execConfig.ID, options) }()
|
2022-04-12 17:57:23 -04:00
|
|
|
|
|
|
|
// Starting an exec can take a significant amount of time: on the order
|
|
|
|
// of 1s in extreme cases. The time it takes dockerd and containerd to
|
|
|
|
// start the exec is time that the probe process is not running, and so
|
|
|
|
// should not count towards the health check's timeout. Apply a separate
|
|
|
|
// timeout to abort if the exec request is wedged.
|
|
|
|
tm := time.NewTimer(30 * time.Second)
|
|
|
|
defer tm.Stop()
|
|
|
|
select {
|
|
|
|
case <-tm.C:
|
|
|
|
return nil, fmt.Errorf("timed out starting health check for container %s", cntr.ID)
|
|
|
|
case err := <-execErr:
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
case <-execConfig.Started:
|
|
|
|
healthCheckStartDuration.UpdateSince(startTime)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !tm.Stop() {
|
|
|
|
<-tm.C
|
|
|
|
}
|
|
|
|
probeTimeout := timeoutWithDefault(cntr.Config.Healthcheck.Timeout, defaultProbeTimeout)
|
|
|
|
tm.Reset(probeTimeout)
|
|
|
|
select {
|
|
|
|
case <-tm.C:
|
|
|
|
cancelProbe()
|
|
|
|
logrus.WithContext(ctx).Debugf("Health check for container %s taking too long", cntr.ID)
|
2022-08-22 22:34:51 -04:00
|
|
|
// Wait for probe to exit (it might take some time to call containerd to kill
|
|
|
|
// the process and we don't want dying probes to pile up).
|
2022-04-12 17:57:23 -04:00
|
|
|
<-execErr
|
|
|
|
return &types.HealthcheckResult{
|
|
|
|
ExitCode: -1,
|
|
|
|
Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout),
|
|
|
|
End: time.Now(),
|
|
|
|
}, nil
|
|
|
|
case err := <-execErr:
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-04-18 05:48:13 -04:00
|
|
|
}
|
2022-04-12 17:57:23 -04:00
|
|
|
|
2016-04-18 05:48:13 -04:00
|
|
|
info, err := d.getExecConfig(execConfig.ID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-08-24 19:35:07 -04:00
|
|
|
exitCode, err := func() (int, error) {
|
|
|
|
info.Lock()
|
|
|
|
defer info.Unlock()
|
|
|
|
if info.ExitCode == nil {
|
|
|
|
info.Unlock()
|
|
|
|
return 0, fmt.Errorf("healthcheck for container %s has no exit code", cntr.ID)
|
|
|
|
}
|
|
|
|
return *info.ExitCode, nil
|
|
|
|
}()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-04-18 05:48:13 -04:00
|
|
|
}
|
|
|
|
// Note: Go's json package will handle invalid UTF-8 for us
|
|
|
|
out := output.String()
|
|
|
|
return &types.HealthcheckResult{
|
|
|
|
End: time.Now(),
|
2022-08-24 19:35:07 -04:00
|
|
|
ExitCode: exitCode,
|
2016-04-18 05:48:13 -04:00
|
|
|
Output: out,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the container's Status.Health struct based on the latest probe's result.
|
2016-11-15 21:02:26 -05:00
|
|
|
func handleProbeResult(d *Daemon, c *container.Container, result *types.HealthcheckResult, done chan struct{}) {
|
2016-04-18 05:48:13 -04:00
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
|
|
|
|
2016-11-15 21:02:26 -05:00
|
|
|
// probe may have been cancelled while waiting on lock. Ignore result then
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2016-04-18 05:48:13 -04:00
|
|
|
retries := c.Config.Healthcheck.Retries
|
|
|
|
if retries <= 0 {
|
2016-06-03 07:28:08 -04:00
|
|
|
retries = defaultProbeRetries
|
2016-04-18 05:48:13 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
h := c.State.Health
|
2017-11-15 22:28:36 -05:00
|
|
|
oldStatus := h.Status()
|
2016-04-18 05:48:13 -04:00
|
|
|
|
|
|
|
if len(h.Log) >= maxLogEntries {
|
|
|
|
h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result)
|
|
|
|
} else {
|
|
|
|
h.Log = append(h.Log, result)
|
|
|
|
}
|
|
|
|
|
|
|
|
if result.ExitCode == exitStatusHealthy {
|
|
|
|
h.FailingStreak = 0
|
2017-11-15 22:28:36 -05:00
|
|
|
h.SetStatus(types.Healthy)
|
2016-11-29 04:58:47 -05:00
|
|
|
} else { // Failure (including invalid exit code)
|
|
|
|
shouldIncrementStreak := true
|
|
|
|
|
|
|
|
// If the container is starting (i.e. we never had a successful health check)
|
|
|
|
// then we check if we are within the start period of the container in which
|
|
|
|
// case we do not increment the failure streak.
|
2017-11-15 22:28:36 -05:00
|
|
|
if h.Status() == types.Starting {
|
2016-11-29 04:58:47 -05:00
|
|
|
startPeriod := timeoutWithDefault(c.Config.Healthcheck.StartPeriod, defaultStartPeriod)
|
|
|
|
timeSinceStart := result.Start.Sub(c.State.StartedAt)
|
|
|
|
|
|
|
|
// If still within the start period, then don't increment failing streak.
|
|
|
|
if timeSinceStart < startPeriod {
|
|
|
|
shouldIncrementStreak = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if shouldIncrementStreak {
|
|
|
|
h.FailingStreak++
|
|
|
|
|
|
|
|
if h.FailingStreak >= retries {
|
2017-11-15 22:28:36 -05:00
|
|
|
h.SetStatus(types.Unhealthy)
|
2016-11-29 04:58:47 -05:00
|
|
|
}
|
2016-04-18 05:48:13 -04:00
|
|
|
}
|
|
|
|
// Else we're starting or healthy. Stay in that state.
|
|
|
|
}
|
|
|
|
|
2017-02-22 17:02:20 -05:00
|
|
|
// replicate Health status changes
|
2017-02-23 18:12:18 -05:00
|
|
|
if err := c.CheckpointTo(d.containersReplica); err != nil {
|
2017-02-22 17:02:20 -05:00
|
|
|
// queries will be inconsistent until the next probe runs or other state mutations
|
2017-02-23 18:12:18 -05:00
|
|
|
// checkpoint the container
|
2017-02-22 17:02:20 -05:00
|
|
|
logrus.Errorf("Error replicating health state for container %s: %v", c.ID, err)
|
|
|
|
}
|
|
|
|
|
2017-11-15 22:28:36 -05:00
|
|
|
current := h.Status()
|
|
|
|
if oldStatus != current {
|
|
|
|
d.LogContainerEvent(c, "health_status: "+current)
|
2016-04-18 05:48:13 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run the container's monitoring thread until notified via "stop".
|
|
|
|
// There is never more than one monitor thread running per container at a time.
|
|
|
|
func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) {
|
|
|
|
probeInterval := timeoutWithDefault(c.Config.Healthcheck.Interval, defaultProbeInterval)
|
2019-01-09 13:24:03 -05:00
|
|
|
|
|
|
|
intervalTimer := time.NewTimer(probeInterval)
|
|
|
|
defer intervalTimer.Stop()
|
|
|
|
|
2016-04-18 05:48:13 -04:00
|
|
|
for {
|
2019-01-09 13:24:03 -05:00
|
|
|
intervalTimer.Reset(probeInterval)
|
|
|
|
|
2016-04-18 05:48:13 -04:00
|
|
|
select {
|
|
|
|
case <-stop:
|
2016-09-28 02:00:46 -04:00
|
|
|
logrus.Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID)
|
2016-04-18 05:48:13 -04:00
|
|
|
return
|
2019-01-09 13:24:03 -05:00
|
|
|
case <-intervalTimer.C:
|
2016-09-28 02:00:46 -04:00
|
|
|
logrus.Debugf("Running health check for container %s ...", c.ID)
|
2016-04-18 05:48:13 -04:00
|
|
|
startTime := time.Now()
|
2022-04-12 17:57:23 -04:00
|
|
|
ctx, cancelProbe := context.WithCancel(context.Background())
|
2017-06-21 20:36:19 -04:00
|
|
|
results := make(chan *types.HealthcheckResult, 1)
|
2016-04-18 05:48:13 -04:00
|
|
|
go func() {
|
2016-07-20 19:11:28 -04:00
|
|
|
healthChecksCounter.Inc()
|
2016-04-18 05:48:13 -04:00
|
|
|
result, err := probe.run(ctx, d, c)
|
|
|
|
if err != nil {
|
2016-07-20 19:11:28 -04:00
|
|
|
healthChecksFailedCounter.Inc()
|
2016-09-28 02:00:46 -04:00
|
|
|
logrus.Warnf("Health check for container %s error: %v", c.ID, err)
|
2016-04-18 05:48:13 -04:00
|
|
|
results <- &types.HealthcheckResult{
|
|
|
|
ExitCode: -1,
|
|
|
|
Output: err.Error(),
|
|
|
|
Start: startTime,
|
|
|
|
End: time.Now(),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
result.Start = startTime
|
2016-09-28 02:00:46 -04:00
|
|
|
logrus.Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode)
|
2016-04-18 05:48:13 -04:00
|
|
|
results <- result
|
|
|
|
}
|
|
|
|
close(results)
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case <-stop:
|
2016-09-28 02:00:46 -04:00
|
|
|
logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID)
|
2016-04-18 05:48:13 -04:00
|
|
|
cancelProbe()
|
2017-06-21 20:36:19 -04:00
|
|
|
// Wait for probe to exit (it might take a while to respond to the TERM
|
|
|
|
// signal and we don't want dying probes to pile up).
|
|
|
|
<-results
|
2016-04-18 05:48:13 -04:00
|
|
|
return
|
|
|
|
case result := <-results:
|
2016-11-15 21:02:26 -05:00
|
|
|
handleProbeResult(d, c, result, stop)
|
2016-04-18 05:48:13 -04:00
|
|
|
cancelProbe()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get a suitable probe implementation for the container's healthcheck configuration.
|
2016-07-21 16:02:12 -04:00
|
|
|
// Nil will be returned if no healthcheck was configured or NONE was set.
|
2016-04-18 05:48:13 -04:00
|
|
|
func getProbe(c *container.Container) probe {
|
|
|
|
config := c.Config.Healthcheck
|
|
|
|
if config == nil || len(config.Test) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
switch config.Test[0] {
|
|
|
|
case "CMD":
|
|
|
|
return &cmdProbe{shell: false}
|
|
|
|
case "CMD-SHELL":
|
|
|
|
return &cmdProbe{shell: true}
|
2017-11-15 20:42:13 -05:00
|
|
|
case "NONE":
|
|
|
|
return nil
|
2016-04-18 05:48:13 -04:00
|
|
|
default:
|
2016-09-28 02:00:46 -04:00
|
|
|
logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID)
|
2016-04-18 05:48:13 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the health-check monitor is running or not, depending on the current
|
|
|
|
// state of the container.
|
|
|
|
// Called from monitor.go, with c locked.
|
2019-08-09 07:19:49 -04:00
|
|
|
func (daemon *Daemon) updateHealthMonitor(c *container.Container) {
|
2016-04-18 05:48:13 -04:00
|
|
|
h := c.State.Health
|
|
|
|
if h == nil {
|
|
|
|
return // No healthcheck configured
|
|
|
|
}
|
|
|
|
|
|
|
|
probe := getProbe(c)
|
|
|
|
wantRunning := c.Running && !c.Paused && probe != nil
|
|
|
|
if wantRunning {
|
|
|
|
if stop := h.OpenMonitorChannel(); stop != nil {
|
2019-08-09 07:19:49 -04:00
|
|
|
go monitor(daemon, c, stop, probe)
|
2016-04-18 05:48:13 -04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
h.CloseMonitorChannel()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the health state for a newly-started, restarted or restored container.
|
|
|
|
// initHealthMonitor is called from monitor.go and we should never be running
|
|
|
|
// two instances at once.
|
|
|
|
// Called with c locked.
|
2019-08-09 07:19:49 -04:00
|
|
|
func (daemon *Daemon) initHealthMonitor(c *container.Container) {
|
2016-07-21 16:02:12 -04:00
|
|
|
// If no healthcheck is setup then don't init the monitor
|
|
|
|
if getProbe(c) == nil {
|
2016-04-18 05:48:13 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is needed in case we're auto-restarting
|
2019-08-09 07:19:49 -04:00
|
|
|
daemon.stopHealthchecks(c)
|
2016-04-18 05:48:13 -04:00
|
|
|
|
2016-10-14 10:26:47 -04:00
|
|
|
if h := c.State.Health; h != nil {
|
2017-11-15 22:28:36 -05:00
|
|
|
h.SetStatus(types.Starting)
|
2016-10-14 10:26:47 -04:00
|
|
|
h.FailingStreak = 0
|
|
|
|
} else {
|
2016-04-18 05:48:13 -04:00
|
|
|
h := &container.Health{}
|
2017-11-15 22:28:36 -05:00
|
|
|
h.SetStatus(types.Starting)
|
2016-04-18 05:48:13 -04:00
|
|
|
c.State.Health = h
|
|
|
|
}
|
|
|
|
|
2019-08-09 07:19:49 -04:00
|
|
|
daemon.updateHealthMonitor(c)
|
2016-04-18 05:48:13 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Called when the container is being stopped (whether because the health check is
|
|
|
|
// failing or for any other reason).
|
2019-08-09 07:19:49 -04:00
|
|
|
func (daemon *Daemon) stopHealthchecks(c *container.Container) {
|
2016-04-18 05:48:13 -04:00
|
|
|
h := c.State.Health
|
|
|
|
if h != nil {
|
|
|
|
h.CloseMonitorChannel()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Buffer up to maxOutputLen bytes. Further data is discarded.
|
|
|
|
type limitedBuffer struct {
|
|
|
|
buf bytes.Buffer
|
2016-09-12 13:24:07 -04:00
|
|
|
mu sync.Mutex
|
2016-04-18 05:48:13 -04:00
|
|
|
truncated bool // indicates that data has been lost
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append to limitedBuffer while there is room.
|
|
|
|
func (b *limitedBuffer) Write(data []byte) (int, error) {
|
2016-09-12 13:24:07 -04:00
|
|
|
b.mu.Lock()
|
|
|
|
defer b.mu.Unlock()
|
|
|
|
|
2016-04-18 05:48:13 -04:00
|
|
|
bufLen := b.buf.Len()
|
|
|
|
dataLen := len(data)
|
|
|
|
keep := min(maxOutputLen-bufLen, dataLen)
|
|
|
|
if keep > 0 {
|
|
|
|
b.buf.Write(data[:keep])
|
|
|
|
}
|
|
|
|
if keep < dataLen {
|
|
|
|
b.truncated = true
|
|
|
|
}
|
|
|
|
return dataLen, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The contents of the buffer, with "..." appended if it overflowed.
|
|
|
|
func (b *limitedBuffer) String() string {
|
2016-09-12 13:24:07 -04:00
|
|
|
b.mu.Lock()
|
|
|
|
defer b.mu.Unlock()
|
|
|
|
|
2016-04-18 05:48:13 -04:00
|
|
|
out := b.buf.String()
|
|
|
|
if b.truncated {
|
|
|
|
out = out + "..."
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
// If configuredValue is zero, use defaultValue instead.
|
|
|
|
func timeoutWithDefault(configuredValue time.Duration, defaultValue time.Duration) time.Duration {
|
|
|
|
if configuredValue == 0 {
|
|
|
|
return defaultValue
|
|
|
|
}
|
|
|
|
return configuredValue
|
|
|
|
}
|
|
|
|
|
|
|
|
func min(x, y int) int {
|
|
|
|
if x < y {
|
|
|
|
return x
|
|
|
|
}
|
|
|
|
return y
|
|
|
|
}
|
2016-11-15 11:53:24 -05:00
|
|
|
|
2019-06-21 07:51:59 -04:00
|
|
|
func getShell(cntr *container.Container) []string {
|
|
|
|
if len(cntr.Config.Shell) != 0 {
|
|
|
|
return cntr.Config.Shell
|
2016-11-15 11:53:24 -05:00
|
|
|
}
|
|
|
|
if runtime.GOOS != "windows" {
|
|
|
|
return []string{"/bin/sh", "-c"}
|
|
|
|
}
|
2019-06-21 07:51:59 -04:00
|
|
|
if cntr.OS != runtime.GOOS {
|
|
|
|
return []string{"/bin/sh", "-c"}
|
|
|
|
}
|
2016-11-15 11:53:24 -05:00
|
|
|
return []string{"cmd", "/S", "/C"}
|
|
|
|
}
|