1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00
moby--moby/daemon/monitor.go

250 lines
6.6 KiB
Go
Raw Permalink Normal View History

package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"strconv"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/container"
Windows: Experimental: Allow containerd for runtime Signed-off-by: John Howard <jhoward@microsoft.com> This is the first step in refactoring moby (dockerd) to use containerd on Windows. Similar to the current model in Linux, this adds the option to enable it for runtime. It does not switch the graphdriver to containerd snapshotters. - Refactors libcontainerd to a series of subpackages so that either a "local" containerd (1) or a "remote" (2) containerd can be loaded as opposed to conditional compile as "local" for Windows and "remote" for Linux. - Updates libcontainerd such that Windows has an option to allow the use of a "remote" containerd. Here, it communicates over a named pipe using GRPC. This is currently guarded behind the experimental flag, an environment variable, and the providing of a pipename to connect to containerd. - Infrastructure pieces such as under pkg/system to have helper functions for determining whether containerd is being used. (1) "local" containerd is what the daemon on Windows has used since inception. It's not really containerd at all - it's simply local invocation of HCS APIs directly in-process from the daemon through the Microsoft/hcsshim library. (2) "remote" containerd is what docker on Linux uses for it's runtime. It means that there is a separate containerd service running, and docker communicates over GRPC to it. To try this out, you will need to start with something like the following: Window 1: containerd --log-level debug Window 2: $env:DOCKER_WINDOWS_CONTAINERD=1 dockerd --experimental -D --containerd \\.\pipe\containerd-containerd You will need the following binary from github.com/containerd/containerd in your path: - containerd.exe You will need the following binaries from github.com/Microsoft/hcsshim in your path: - runhcs.exe - containerd-shim-runhcs-v1.exe For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`. This is no different to the current requirements. However, you may need updated binaries, particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit binaries are somewhat out of date. Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required functionality needed for docker. This will come in time - this is a baby (although large) step to migrating Docker on Windows to containerd. Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable. This abstraction is done in HCSShim. (Referring specifically to runtime) Note the LCOW graphdriver still uses HCS v1 APIs regardless. Note also that this does not migrate docker to use containerd snapshotters rather than graphdrivers. This needs to be done in conjunction with Linux also doing the same switch.
2019-01-08 17:30:52 -05:00
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
"github.com/docker/docker/restartmanager"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func (daemon *Daemon) setStateCounter(c *container.Container) {
switch c.StateString() {
case "paused":
stateCtr.set(c.ID, "paused")
case "running":
stateCtr.set(c.ID, "running")
default:
stateCtr.set(c.ID, "stopped")
}
}
Handle missing c8d task on stop In this case, we are sending a signal to the container (typically this would be SIGKILL or SIGTERM, but could be any signal), but container reports that the process does not exist. At the point this code is happening, dockerd thinks that the container is running, but containerd reports that it is not. Since containerd reports that it is not running, try to collect the exit status of the container from containerd, and mark the container as stopped in dockerd. Repro this problem like so: ``` id=$(docker run -d busybox top) pkill containerd && pkill top docker stop $id ``` Without this change, `docker stop $id` will first try to send SIGTERM, wait for exit, then try SIGKILL. Because the process doesn't exist to begin with, no signal is sent, and so nothing happens. Since we won't receive any event here to process, the container can never be marked as stopped until the daemon is restarted. With the change `docker stop` succeeds immediately (since the process is already stopped) and we mark the container as stopped. We handle the case as if we missed a exit event. There are definitely some other places in the stack that could use some improvement here, but this helps people get out of a sticky situation. With io.containerd.runc.v2, no event is ever recieved by docker because the shim quits trying to send the event. With io.containerd.runtime.v1.linux the TastExit event is sent before dockerd can reconnect to the event stream and we miss the event. No matter what, we shouldn't be reliant on the shim doing the right thing here, nor can we rely on a steady event stream. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 14:47:40 -04:00
func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontainerdtypes.EventInfo) error {
c.Lock()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
ec, et, err := daemon.containerd.DeleteTask(ctx, c.ID)
cancel()
Handle missing c8d task on stop In this case, we are sending a signal to the container (typically this would be SIGKILL or SIGTERM, but could be any signal), but container reports that the process does not exist. At the point this code is happening, dockerd thinks that the container is running, but containerd reports that it is not. Since containerd reports that it is not running, try to collect the exit status of the container from containerd, and mark the container as stopped in dockerd. Repro this problem like so: ``` id=$(docker run -d busybox top) pkill containerd && pkill top docker stop $id ``` Without this change, `docker stop $id` will first try to send SIGTERM, wait for exit, then try SIGKILL. Because the process doesn't exist to begin with, no signal is sent, and so nothing happens. Since we won't receive any event here to process, the container can never be marked as stopped until the daemon is restarted. With the change `docker stop` succeeds immediately (since the process is already stopped) and we mark the container as stopped. We handle the case as if we missed a exit event. There are definitely some other places in the stack that could use some improvement here, but this helps people get out of a sticky situation. With io.containerd.runc.v2, no event is ever recieved by docker because the shim quits trying to send the event. With io.containerd.runtime.v1.linux the TastExit event is sent before dockerd can reconnect to the event stream and we miss the event. No matter what, we shouldn't be reliant on the shim doing the right thing here, nor can we rely on a steady event stream. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 14:47:40 -04:00
if err != nil {
logrus.WithError(err).WithField("container", c.ID).Warnf("failed to delete container from containerd")
Handle missing c8d task on stop In this case, we are sending a signal to the container (typically this would be SIGKILL or SIGTERM, but could be any signal), but container reports that the process does not exist. At the point this code is happening, dockerd thinks that the container is running, but containerd reports that it is not. Since containerd reports that it is not running, try to collect the exit status of the container from containerd, and mark the container as stopped in dockerd. Repro this problem like so: ``` id=$(docker run -d busybox top) pkill containerd && pkill top docker stop $id ``` Without this change, `docker stop $id` will first try to send SIGTERM, wait for exit, then try SIGKILL. Because the process doesn't exist to begin with, no signal is sent, and so nothing happens. Since we won't receive any event here to process, the container can never be marked as stopped until the daemon is restarted. With the change `docker stop` succeeds immediately (since the process is already stopped) and we mark the container as stopped. We handle the case as if we missed a exit event. There are definitely some other places in the stack that could use some improvement here, but this helps people get out of a sticky situation. With io.containerd.runc.v2, no event is ever recieved by docker because the shim quits trying to send the event. With io.containerd.runtime.v1.linux the TastExit event is sent before dockerd can reconnect to the event stream and we miss the event. No matter what, we shouldn't be reliant on the shim doing the right thing here, nor can we rely on a steady event stream. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 14:47:40 -04:00
}
ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second)
Handle missing c8d task on stop In this case, we are sending a signal to the container (typically this would be SIGKILL or SIGTERM, but could be any signal), but container reports that the process does not exist. At the point this code is happening, dockerd thinks that the container is running, but containerd reports that it is not. Since containerd reports that it is not running, try to collect the exit status of the container from containerd, and mark the container as stopped in dockerd. Repro this problem like so: ``` id=$(docker run -d busybox top) pkill containerd && pkill top docker stop $id ``` Without this change, `docker stop $id` will first try to send SIGTERM, wait for exit, then try SIGKILL. Because the process doesn't exist to begin with, no signal is sent, and so nothing happens. Since we won't receive any event here to process, the container can never be marked as stopped until the daemon is restarted. With the change `docker stop` succeeds immediately (since the process is already stopped) and we mark the container as stopped. We handle the case as if we missed a exit event. There are definitely some other places in the stack that could use some improvement here, but this helps people get out of a sticky situation. With io.containerd.runc.v2, no event is ever recieved by docker because the shim quits trying to send the event. With io.containerd.runtime.v1.linux the TastExit event is sent before dockerd can reconnect to the event stream and we miss the event. No matter what, we shouldn't be reliant on the shim doing the right thing here, nor can we rely on a steady event stream. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 14:47:40 -04:00
c.StreamConfig.Wait(ctx)
cancel()
Handle missing c8d task on stop In this case, we are sending a signal to the container (typically this would be SIGKILL or SIGTERM, but could be any signal), but container reports that the process does not exist. At the point this code is happening, dockerd thinks that the container is running, but containerd reports that it is not. Since containerd reports that it is not running, try to collect the exit status of the container from containerd, and mark the container as stopped in dockerd. Repro this problem like so: ``` id=$(docker run -d busybox top) pkill containerd && pkill top docker stop $id ``` Without this change, `docker stop $id` will first try to send SIGTERM, wait for exit, then try SIGKILL. Because the process doesn't exist to begin with, no signal is sent, and so nothing happens. Since we won't receive any event here to process, the container can never be marked as stopped until the daemon is restarted. With the change `docker stop` succeeds immediately (since the process is already stopped) and we mark the container as stopped. We handle the case as if we missed a exit event. There are definitely some other places in the stack that could use some improvement here, but this helps people get out of a sticky situation. With io.containerd.runc.v2, no event is ever recieved by docker because the shim quits trying to send the event. With io.containerd.runtime.v1.linux the TastExit event is sent before dockerd can reconnect to the event stream and we miss the event. No matter what, we shouldn't be reliant on the shim doing the right thing here, nor can we rely on a steady event stream. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 14:47:40 -04:00
c.Reset(false)
exitStatus := container.ExitStatus{
ExitCode: int(ec),
ExitedAt: et,
}
if e != nil {
exitStatus.ExitCode = int(e.ExitCode)
exitStatus.ExitedAt = e.ExitedAt
exitStatus.OOMKilled = e.OOMKilled
if e.Error != nil {
c.SetError(e.Error)
}
}
daemonShutdown := daemon.IsShuttingDown()
execDuration := time.Since(c.StartedAt)
restart, wait, err := c.RestartManager().ShouldRestart(ec, daemonShutdown || c.HasBeenManuallyStopped, execDuration)
if err != nil {
logrus.WithError(err).
WithField("container", c.ID).
WithField("restartCount", c.RestartCount).
WithField("exitStatus", exitStatus).
WithField("daemonShuttingDown", daemonShutdown).
WithField("hasBeenManuallyStopped", c.HasBeenManuallyStopped).
WithField("execDuration", execDuration).
Warn("ShouldRestart failed, container will not be restarted")
restart = false
}
// cancel healthcheck here, they will be automatically
// restarted if/when the container is started again
daemon.stopHealthchecks(c)
attributes := map[string]string{
"exitCode": strconv.Itoa(int(ec)),
}
daemon.Cleanup(c)
if restart {
Handle missing c8d task on stop In this case, we are sending a signal to the container (typically this would be SIGKILL or SIGTERM, but could be any signal), but container reports that the process does not exist. At the point this code is happening, dockerd thinks that the container is running, but containerd reports that it is not. Since containerd reports that it is not running, try to collect the exit status of the container from containerd, and mark the container as stopped in dockerd. Repro this problem like so: ``` id=$(docker run -d busybox top) pkill containerd && pkill top docker stop $id ``` Without this change, `docker stop $id` will first try to send SIGTERM, wait for exit, then try SIGKILL. Because the process doesn't exist to begin with, no signal is sent, and so nothing happens. Since we won't receive any event here to process, the container can never be marked as stopped until the daemon is restarted. With the change `docker stop` succeeds immediately (since the process is already stopped) and we mark the container as stopped. We handle the case as if we missed a exit event. There are definitely some other places in the stack that could use some improvement here, but this helps people get out of a sticky situation. With io.containerd.runc.v2, no event is ever recieved by docker because the shim quits trying to send the event. With io.containerd.runtime.v1.linux the TastExit event is sent before dockerd can reconnect to the event stream and we miss the event. No matter what, we shouldn't be reliant on the shim doing the right thing here, nor can we rely on a steady event stream. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 14:47:40 -04:00
c.RestartCount++
logrus.WithField("container", c.ID).
WithField("restartCount", c.RestartCount).
WithField("exitStatus", exitStatus).
WithField("manualRestart", c.HasBeenManuallyRestarted).
Debug("Restarting container")
Handle missing c8d task on stop In this case, we are sending a signal to the container (typically this would be SIGKILL or SIGTERM, but could be any signal), but container reports that the process does not exist. At the point this code is happening, dockerd thinks that the container is running, but containerd reports that it is not. Since containerd reports that it is not running, try to collect the exit status of the container from containerd, and mark the container as stopped in dockerd. Repro this problem like so: ``` id=$(docker run -d busybox top) pkill containerd && pkill top docker stop $id ``` Without this change, `docker stop $id` will first try to send SIGTERM, wait for exit, then try SIGKILL. Because the process doesn't exist to begin with, no signal is sent, and so nothing happens. Since we won't receive any event here to process, the container can never be marked as stopped until the daemon is restarted. With the change `docker stop` succeeds immediately (since the process is already stopped) and we mark the container as stopped. We handle the case as if we missed a exit event. There are definitely some other places in the stack that could use some improvement here, but this helps people get out of a sticky situation. With io.containerd.runc.v2, no event is ever recieved by docker because the shim quits trying to send the event. With io.containerd.runtime.v1.linux the TastExit event is sent before dockerd can reconnect to the event stream and we miss the event. No matter what, we shouldn't be reliant on the shim doing the right thing here, nor can we rely on a steady event stream. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 14:47:40 -04:00
c.SetRestarting(&exitStatus)
} else {
c.SetStopped(&exitStatus)
if !c.HasBeenManuallyRestarted {
defer daemon.autoRemove(c)
}
Handle missing c8d task on stop In this case, we are sending a signal to the container (typically this would be SIGKILL or SIGTERM, but could be any signal), but container reports that the process does not exist. At the point this code is happening, dockerd thinks that the container is running, but containerd reports that it is not. Since containerd reports that it is not running, try to collect the exit status of the container from containerd, and mark the container as stopped in dockerd. Repro this problem like so: ``` id=$(docker run -d busybox top) pkill containerd && pkill top docker stop $id ``` Without this change, `docker stop $id` will first try to send SIGTERM, wait for exit, then try SIGKILL. Because the process doesn't exist to begin with, no signal is sent, and so nothing happens. Since we won't receive any event here to process, the container can never be marked as stopped until the daemon is restarted. With the change `docker stop` succeeds immediately (since the process is already stopped) and we mark the container as stopped. We handle the case as if we missed a exit event. There are definitely some other places in the stack that could use some improvement here, but this helps people get out of a sticky situation. With io.containerd.runc.v2, no event is ever recieved by docker because the shim quits trying to send the event. With io.containerd.runtime.v1.linux the TastExit event is sent before dockerd can reconnect to the event stream and we miss the event. No matter what, we shouldn't be reliant on the shim doing the right thing here, nor can we rely on a steady event stream. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 14:47:40 -04:00
}
defer c.Unlock() // needs to be called before autoRemove
daemon.setStateCounter(c)
cpErr := c.CheckpointTo(daemon.containersReplica)
daemon.LogContainerEventWithAttributes(c, "die", attributes)
if restart {
Handle missing c8d task on stop In this case, we are sending a signal to the container (typically this would be SIGKILL or SIGTERM, but could be any signal), but container reports that the process does not exist. At the point this code is happening, dockerd thinks that the container is running, but containerd reports that it is not. Since containerd reports that it is not running, try to collect the exit status of the container from containerd, and mark the container as stopped in dockerd. Repro this problem like so: ``` id=$(docker run -d busybox top) pkill containerd && pkill top docker stop $id ``` Without this change, `docker stop $id` will first try to send SIGTERM, wait for exit, then try SIGKILL. Because the process doesn't exist to begin with, no signal is sent, and so nothing happens. Since we won't receive any event here to process, the container can never be marked as stopped until the daemon is restarted. With the change `docker stop` succeeds immediately (since the process is already stopped) and we mark the container as stopped. We handle the case as if we missed a exit event. There are definitely some other places in the stack that could use some improvement here, but this helps people get out of a sticky situation. With io.containerd.runc.v2, no event is ever recieved by docker because the shim quits trying to send the event. With io.containerd.runtime.v1.linux the TastExit event is sent before dockerd can reconnect to the event stream and we miss the event. No matter what, we shouldn't be reliant on the shim doing the right thing here, nor can we rely on a steady event stream. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 14:47:40 -04:00
go func() {
err := <-wait
if err == nil {
// daemon.netController is initialized when daemon is restoring containers.
// But containerStart will use daemon.netController segment.
// So to avoid panic at startup process, here must wait util daemon restore done.
daemon.waitForStartupDone()
if err = daemon.containerStart(c, "", "", false); err != nil {
logrus.Debugf("failed to restart container: %+v", err)
}
}
if err != nil {
c.Lock()
c.SetStopped(&exitStatus)
daemon.setStateCounter(c)
c.CheckpointTo(daemon.containersReplica)
c.Unlock()
defer daemon.autoRemove(c)
if err != restartmanager.ErrRestartCanceled {
logrus.Errorf("restartmanger wait error: %+v", err)
}
}
}()
}
return cpErr
}
// ProcessEvent is called by libcontainerd whenever an event occurs
Windows: Experimental: Allow containerd for runtime Signed-off-by: John Howard <jhoward@microsoft.com> This is the first step in refactoring moby (dockerd) to use containerd on Windows. Similar to the current model in Linux, this adds the option to enable it for runtime. It does not switch the graphdriver to containerd snapshotters. - Refactors libcontainerd to a series of subpackages so that either a "local" containerd (1) or a "remote" (2) containerd can be loaded as opposed to conditional compile as "local" for Windows and "remote" for Linux. - Updates libcontainerd such that Windows has an option to allow the use of a "remote" containerd. Here, it communicates over a named pipe using GRPC. This is currently guarded behind the experimental flag, an environment variable, and the providing of a pipename to connect to containerd. - Infrastructure pieces such as under pkg/system to have helper functions for determining whether containerd is being used. (1) "local" containerd is what the daemon on Windows has used since inception. It's not really containerd at all - it's simply local invocation of HCS APIs directly in-process from the daemon through the Microsoft/hcsshim library. (2) "remote" containerd is what docker on Linux uses for it's runtime. It means that there is a separate containerd service running, and docker communicates over GRPC to it. To try this out, you will need to start with something like the following: Window 1: containerd --log-level debug Window 2: $env:DOCKER_WINDOWS_CONTAINERD=1 dockerd --experimental -D --containerd \\.\pipe\containerd-containerd You will need the following binary from github.com/containerd/containerd in your path: - containerd.exe You will need the following binaries from github.com/Microsoft/hcsshim in your path: - runhcs.exe - containerd-shim-runhcs-v1.exe For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`. This is no different to the current requirements. However, you may need updated binaries, particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit binaries are somewhat out of date. Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required functionality needed for docker. This will come in time - this is a baby (although large) step to migrating Docker on Windows to containerd. Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable. This abstraction is done in HCSShim. (Referring specifically to runtime) Note the LCOW graphdriver still uses HCS v1 APIs regardless. Note also that this does not migrate docker to use containerd snapshotters rather than graphdrivers. This needs to be done in conjunction with Linux also doing the same switch.
2019-01-08 17:30:52 -05:00
func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) error {
c, err := daemon.GetContainer(id)
if err != nil {
return errors.Wrapf(err, "could not find container %s", id)
}
switch e {
Windows: Experimental: Allow containerd for runtime Signed-off-by: John Howard <jhoward@microsoft.com> This is the first step in refactoring moby (dockerd) to use containerd on Windows. Similar to the current model in Linux, this adds the option to enable it for runtime. It does not switch the graphdriver to containerd snapshotters. - Refactors libcontainerd to a series of subpackages so that either a "local" containerd (1) or a "remote" (2) containerd can be loaded as opposed to conditional compile as "local" for Windows and "remote" for Linux. - Updates libcontainerd such that Windows has an option to allow the use of a "remote" containerd. Here, it communicates over a named pipe using GRPC. This is currently guarded behind the experimental flag, an environment variable, and the providing of a pipename to connect to containerd. - Infrastructure pieces such as under pkg/system to have helper functions for determining whether containerd is being used. (1) "local" containerd is what the daemon on Windows has used since inception. It's not really containerd at all - it's simply local invocation of HCS APIs directly in-process from the daemon through the Microsoft/hcsshim library. (2) "remote" containerd is what docker on Linux uses for it's runtime. It means that there is a separate containerd service running, and docker communicates over GRPC to it. To try this out, you will need to start with something like the following: Window 1: containerd --log-level debug Window 2: $env:DOCKER_WINDOWS_CONTAINERD=1 dockerd --experimental -D --containerd \\.\pipe\containerd-containerd You will need the following binary from github.com/containerd/containerd in your path: - containerd.exe You will need the following binaries from github.com/Microsoft/hcsshim in your path: - runhcs.exe - containerd-shim-runhcs-v1.exe For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`. This is no different to the current requirements. However, you may need updated binaries, particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit binaries are somewhat out of date. Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required functionality needed for docker. This will come in time - this is a baby (although large) step to migrating Docker on Windows to containerd. Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable. This abstraction is done in HCSShim. (Referring specifically to runtime) Note the LCOW graphdriver still uses HCS v1 APIs regardless. Note also that this does not migrate docker to use containerd snapshotters rather than graphdrivers. This needs to be done in conjunction with Linux also doing the same switch.
2019-01-08 17:30:52 -05:00
case libcontainerdtypes.EventOOM:
// StateOOM is Linux specific and should never be hit on Windows
if isWindows {
return errors.New("received StateOOM from libcontainerd on Windows. This should never happen")
}
c.Lock()
defer c.Unlock()
Add support for user-defined healthchecks This PR adds support for user-defined health-check probes for Docker containers. It adds a `HEALTHCHECK` instruction to the Dockerfile syntax plus some corresponding "docker run" options. It can be used with a restart policy to automatically restart a container if the check fails. The `HEALTHCHECK` instruction has two forms: * `HEALTHCHECK [OPTIONS] CMD command` (check container health by running a command inside the container) * `HEALTHCHECK NONE` (disable any healthcheck inherited from the base image) The `HEALTHCHECK` instruction tells Docker how to test a container to check that it is still working. This can detect cases such as a web server that is stuck in an infinite loop and unable to handle new connections, even though the server process is still running. When a container has a healthcheck specified, it has a _health status_ in addition to its normal status. This status is initially `starting`. Whenever a health check passes, it becomes `healthy` (whatever state it was previously in). After a certain number of consecutive failures, it becomes `unhealthy`. The options that can appear before `CMD` are: * `--interval=DURATION` (default: `30s`) * `--timeout=DURATION` (default: `30s`) * `--retries=N` (default: `1`) The health check will first run **interval** seconds after the container is started, and then again **interval** seconds after each previous check completes. If a single run of the check takes longer than **timeout** seconds then the check is considered to have failed. It takes **retries** consecutive failures of the health check for the container to be considered `unhealthy`. There can only be one `HEALTHCHECK` instruction in a Dockerfile. If you list more than one then only the last `HEALTHCHECK` will take effect. The command after the `CMD` keyword can be either a shell command (e.g. `HEALTHCHECK CMD /bin/check-running`) or an _exec_ array (as with other Dockerfile commands; see e.g. `ENTRYPOINT` for details). The command's exit status indicates the health status of the container. The possible values are: - 0: success - the container is healthy and ready for use - 1: unhealthy - the container is not working correctly - 2: starting - the container is not ready for use yet, but is working correctly If the probe returns 2 ("starting") when the container has already moved out of the "starting" state then it is treated as "unhealthy" instead. For example, to check every five minutes or so that a web-server is able to serve the site's main page within three seconds: HEALTHCHECK --interval=5m --timeout=3s \ CMD curl -f http://localhost/ || exit 1 To help debug failing probes, any output text (UTF-8 encoded) that the command writes on stdout or stderr will be stored in the health status and can be queried with `docker inspect`. Such output should be kept short (only the first 4096 bytes are stored currently). When the health status of a container changes, a `health_status` event is generated with the new status. The health status is also displayed in the `docker ps` output. Signed-off-by: Thomas Leonard <thomas.leonard@docker.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-04-18 05:48:13 -04:00
daemon.updateHealthMonitor(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
return err
}
daemon.LogContainerEvent(c, "oom")
Windows: Experimental: Allow containerd for runtime Signed-off-by: John Howard <jhoward@microsoft.com> This is the first step in refactoring moby (dockerd) to use containerd on Windows. Similar to the current model in Linux, this adds the option to enable it for runtime. It does not switch the graphdriver to containerd snapshotters. - Refactors libcontainerd to a series of subpackages so that either a "local" containerd (1) or a "remote" (2) containerd can be loaded as opposed to conditional compile as "local" for Windows and "remote" for Linux. - Updates libcontainerd such that Windows has an option to allow the use of a "remote" containerd. Here, it communicates over a named pipe using GRPC. This is currently guarded behind the experimental flag, an environment variable, and the providing of a pipename to connect to containerd. - Infrastructure pieces such as under pkg/system to have helper functions for determining whether containerd is being used. (1) "local" containerd is what the daemon on Windows has used since inception. It's not really containerd at all - it's simply local invocation of HCS APIs directly in-process from the daemon through the Microsoft/hcsshim library. (2) "remote" containerd is what docker on Linux uses for it's runtime. It means that there is a separate containerd service running, and docker communicates over GRPC to it. To try this out, you will need to start with something like the following: Window 1: containerd --log-level debug Window 2: $env:DOCKER_WINDOWS_CONTAINERD=1 dockerd --experimental -D --containerd \\.\pipe\containerd-containerd You will need the following binary from github.com/containerd/containerd in your path: - containerd.exe You will need the following binaries from github.com/Microsoft/hcsshim in your path: - runhcs.exe - containerd-shim-runhcs-v1.exe For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`. This is no different to the current requirements. However, you may need updated binaries, particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit binaries are somewhat out of date. Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required functionality needed for docker. This will come in time - this is a baby (although large) step to migrating Docker on Windows to containerd. Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable. This abstraction is done in HCSShim. (Referring specifically to runtime) Note the LCOW graphdriver still uses HCS v1 APIs regardless. Note also that this does not migrate docker to use containerd snapshotters rather than graphdrivers. This needs to be done in conjunction with Linux also doing the same switch.
2019-01-08 17:30:52 -05:00
case libcontainerdtypes.EventExit:
if int(ei.Pid) == c.Pid {
Handle missing c8d task on stop In this case, we are sending a signal to the container (typically this would be SIGKILL or SIGTERM, but could be any signal), but container reports that the process does not exist. At the point this code is happening, dockerd thinks that the container is running, but containerd reports that it is not. Since containerd reports that it is not running, try to collect the exit status of the container from containerd, and mark the container as stopped in dockerd. Repro this problem like so: ``` id=$(docker run -d busybox top) pkill containerd && pkill top docker stop $id ``` Without this change, `docker stop $id` will first try to send SIGTERM, wait for exit, then try SIGKILL. Because the process doesn't exist to begin with, no signal is sent, and so nothing happens. Since we won't receive any event here to process, the container can never be marked as stopped until the daemon is restarted. With the change `docker stop` succeeds immediately (since the process is already stopped) and we mark the container as stopped. We handle the case as if we missed a exit event. There are definitely some other places in the stack that could use some improvement here, but this helps people get out of a sticky situation. With io.containerd.runc.v2, no event is ever recieved by docker because the shim quits trying to send the event. With io.containerd.runtime.v1.linux the TastExit event is sent before dockerd can reconnect to the event stream and we miss the event. No matter what, we shouldn't be reliant on the shim doing the right thing here, nor can we rely on a steady event stream. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 14:47:40 -04:00
return daemon.handleContainerExit(c, &ei)
}
exitCode := 127
if execConfig := c.ExecCommands.Get(ei.ProcessID); execConfig != nil {
ec := int(ei.ExitCode)
execConfig.Lock()
defer execConfig.Unlock()
execConfig.ExitCode = &ec
execConfig.Running = false
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
execConfig.StreamConfig.Wait(ctx)
cancel()
if err := execConfig.CloseStreams(); err != nil {
logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err)
}
// remove the exec command from the container's store only and not the
// daemon's store so that the exec command can be inspected.
c.ExecCommands.Delete(execConfig.ID, execConfig.Pid)
exitCode = ec
}
attributes := map[string]string{
"execID": ei.ProcessID,
"exitCode": strconv.Itoa(exitCode),
}
daemon.LogContainerEventWithAttributes(c, "exec_die", attributes)
Windows: Experimental: Allow containerd for runtime Signed-off-by: John Howard <jhoward@microsoft.com> This is the first step in refactoring moby (dockerd) to use containerd on Windows. Similar to the current model in Linux, this adds the option to enable it for runtime. It does not switch the graphdriver to containerd snapshotters. - Refactors libcontainerd to a series of subpackages so that either a "local" containerd (1) or a "remote" (2) containerd can be loaded as opposed to conditional compile as "local" for Windows and "remote" for Linux. - Updates libcontainerd such that Windows has an option to allow the use of a "remote" containerd. Here, it communicates over a named pipe using GRPC. This is currently guarded behind the experimental flag, an environment variable, and the providing of a pipename to connect to containerd. - Infrastructure pieces such as under pkg/system to have helper functions for determining whether containerd is being used. (1) "local" containerd is what the daemon on Windows has used since inception. It's not really containerd at all - it's simply local invocation of HCS APIs directly in-process from the daemon through the Microsoft/hcsshim library. (2) "remote" containerd is what docker on Linux uses for it's runtime. It means that there is a separate containerd service running, and docker communicates over GRPC to it. To try this out, you will need to start with something like the following: Window 1: containerd --log-level debug Window 2: $env:DOCKER_WINDOWS_CONTAINERD=1 dockerd --experimental -D --containerd \\.\pipe\containerd-containerd You will need the following binary from github.com/containerd/containerd in your path: - containerd.exe You will need the following binaries from github.com/Microsoft/hcsshim in your path: - runhcs.exe - containerd-shim-runhcs-v1.exe For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`. This is no different to the current requirements. However, you may need updated binaries, particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit binaries are somewhat out of date. Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required functionality needed for docker. This will come in time - this is a baby (although large) step to migrating Docker on Windows to containerd. Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable. This abstraction is done in HCSShim. (Referring specifically to runtime) Note the LCOW graphdriver still uses HCS v1 APIs regardless. Note also that this does not migrate docker to use containerd snapshotters rather than graphdrivers. This needs to be done in conjunction with Linux also doing the same switch.
2019-01-08 17:30:52 -05:00
case libcontainerdtypes.EventStart:
c.Lock()
defer c.Unlock()
// This is here to handle start not generated by docker
if !c.Running {
c.SetRunning(int(ei.Pid), false)
c.HasBeenManuallyStopped = false
c.HasBeenStartedBefore = true
daemon.setStateCounter(c)
daemon.initHealthMonitor(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
return err
}
daemon.LogContainerEvent(c, "start")
}
Windows: Experimental: Allow containerd for runtime Signed-off-by: John Howard <jhoward@microsoft.com> This is the first step in refactoring moby (dockerd) to use containerd on Windows. Similar to the current model in Linux, this adds the option to enable it for runtime. It does not switch the graphdriver to containerd snapshotters. - Refactors libcontainerd to a series of subpackages so that either a "local" containerd (1) or a "remote" (2) containerd can be loaded as opposed to conditional compile as "local" for Windows and "remote" for Linux. - Updates libcontainerd such that Windows has an option to allow the use of a "remote" containerd. Here, it communicates over a named pipe using GRPC. This is currently guarded behind the experimental flag, an environment variable, and the providing of a pipename to connect to containerd. - Infrastructure pieces such as under pkg/system to have helper functions for determining whether containerd is being used. (1) "local" containerd is what the daemon on Windows has used since inception. It's not really containerd at all - it's simply local invocation of HCS APIs directly in-process from the daemon through the Microsoft/hcsshim library. (2) "remote" containerd is what docker on Linux uses for it's runtime. It means that there is a separate containerd service running, and docker communicates over GRPC to it. To try this out, you will need to start with something like the following: Window 1: containerd --log-level debug Window 2: $env:DOCKER_WINDOWS_CONTAINERD=1 dockerd --experimental -D --containerd \\.\pipe\containerd-containerd You will need the following binary from github.com/containerd/containerd in your path: - containerd.exe You will need the following binaries from github.com/Microsoft/hcsshim in your path: - runhcs.exe - containerd-shim-runhcs-v1.exe For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`. This is no different to the current requirements. However, you may need updated binaries, particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit binaries are somewhat out of date. Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required functionality needed for docker. This will come in time - this is a baby (although large) step to migrating Docker on Windows to containerd. Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable. This abstraction is done in HCSShim. (Referring specifically to runtime) Note the LCOW graphdriver still uses HCS v1 APIs regardless. Note also that this does not migrate docker to use containerd snapshotters rather than graphdrivers. This needs to be done in conjunction with Linux also doing the same switch.
2019-01-08 17:30:52 -05:00
case libcontainerdtypes.EventPaused:
c.Lock()
defer c.Unlock()
if !c.Paused {
c.Paused = true
daemon.setStateCounter(c)
daemon.updateHealthMonitor(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
return err
}
daemon.LogContainerEvent(c, "pause")
}
Windows: Experimental: Allow containerd for runtime Signed-off-by: John Howard <jhoward@microsoft.com> This is the first step in refactoring moby (dockerd) to use containerd on Windows. Similar to the current model in Linux, this adds the option to enable it for runtime. It does not switch the graphdriver to containerd snapshotters. - Refactors libcontainerd to a series of subpackages so that either a "local" containerd (1) or a "remote" (2) containerd can be loaded as opposed to conditional compile as "local" for Windows and "remote" for Linux. - Updates libcontainerd such that Windows has an option to allow the use of a "remote" containerd. Here, it communicates over a named pipe using GRPC. This is currently guarded behind the experimental flag, an environment variable, and the providing of a pipename to connect to containerd. - Infrastructure pieces such as under pkg/system to have helper functions for determining whether containerd is being used. (1) "local" containerd is what the daemon on Windows has used since inception. It's not really containerd at all - it's simply local invocation of HCS APIs directly in-process from the daemon through the Microsoft/hcsshim library. (2) "remote" containerd is what docker on Linux uses for it's runtime. It means that there is a separate containerd service running, and docker communicates over GRPC to it. To try this out, you will need to start with something like the following: Window 1: containerd --log-level debug Window 2: $env:DOCKER_WINDOWS_CONTAINERD=1 dockerd --experimental -D --containerd \\.\pipe\containerd-containerd You will need the following binary from github.com/containerd/containerd in your path: - containerd.exe You will need the following binaries from github.com/Microsoft/hcsshim in your path: - runhcs.exe - containerd-shim-runhcs-v1.exe For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`. This is no different to the current requirements. However, you may need updated binaries, particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit binaries are somewhat out of date. Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required functionality needed for docker. This will come in time - this is a baby (although large) step to migrating Docker on Windows to containerd. Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable. This abstraction is done in HCSShim. (Referring specifically to runtime) Note the LCOW graphdriver still uses HCS v1 APIs regardless. Note also that this does not migrate docker to use containerd snapshotters rather than graphdrivers. This needs to be done in conjunction with Linux also doing the same switch.
2019-01-08 17:30:52 -05:00
case libcontainerdtypes.EventResumed:
c.Lock()
defer c.Unlock()
if c.Paused {
c.Paused = false
daemon.setStateCounter(c)
daemon.updateHealthMonitor(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
return err
}
daemon.LogContainerEvent(c, "unpause")
}
}
return nil
}
func (daemon *Daemon) autoRemove(c *container.Container) {
c.Lock()
ar := c.HostConfig.AutoRemove
c.Unlock()
if !ar {
return
}
err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true})
if err == nil {
return
}
if c := daemon.containers.Get(c.ID); c == nil {
return
}
logrus.WithError(err).WithField("container", c.ID).Error("error removing container")
}