1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00
moby--moby/daemon/commit.go

265 lines
6.5 KiB
Go
Raw Normal View History

package daemon
import (
"encoding/json"
"fmt"
"runtime"
"strings"
"time"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/builder/dockerfile"
"github.com/docker/docker/container"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/reference"
containertypes "github.com/docker/engine-api/types/container"
"github.com/docker/go-connections/nat"
)
// merge merges two Config, the image container configuration (defaults values),
// and the user container configuration, either passed by the API or generated
// by the cli.
// It will mutate the specified user configuration (userConf) with the image
// configuration where the user configuration is incomplete.
func merge(userConf, imageConf *containertypes.Config) error {
if userConf.User == "" {
userConf.User = imageConf.User
}
if len(userConf.ExposedPorts) == 0 {
userConf.ExposedPorts = imageConf.ExposedPorts
} else if imageConf.ExposedPorts != nil {
if userConf.ExposedPorts == nil {
userConf.ExposedPorts = make(nat.PortSet)
}
for port := range imageConf.ExposedPorts {
if _, exists := userConf.ExposedPorts[port]; !exists {
userConf.ExposedPorts[port] = struct{}{}
}
}
}
if len(userConf.Env) == 0 {
userConf.Env = imageConf.Env
} else {
for _, imageEnv := range imageConf.Env {
found := false
imageEnvKey := strings.Split(imageEnv, "=")[0]
for _, userEnv := range userConf.Env {
userEnvKey := strings.Split(userEnv, "=")[0]
if imageEnvKey == userEnvKey {
found = true
break
}
}
if !found {
userConf.Env = append(userConf.Env, imageEnv)
}
}
}
if userConf.Labels == nil {
userConf.Labels = map[string]string{}
}
if imageConf.Labels != nil {
for l := range userConf.Labels {
imageConf.Labels[l] = userConf.Labels[l]
}
userConf.Labels = imageConf.Labels
}
if len(userConf.Entrypoint) == 0 {
if len(userConf.Cmd) == 0 {
userConf.Cmd = imageConf.Cmd
}
if userConf.Entrypoint == nil {
userConf.Entrypoint = imageConf.Entrypoint
}
}
Add support for user-defined healthchecks This PR adds support for user-defined health-check probes for Docker containers. It adds a `HEALTHCHECK` instruction to the Dockerfile syntax plus some corresponding "docker run" options. It can be used with a restart policy to automatically restart a container if the check fails. The `HEALTHCHECK` instruction has two forms: * `HEALTHCHECK [OPTIONS] CMD command` (check container health by running a command inside the container) * `HEALTHCHECK NONE` (disable any healthcheck inherited from the base image) The `HEALTHCHECK` instruction tells Docker how to test a container to check that it is still working. This can detect cases such as a web server that is stuck in an infinite loop and unable to handle new connections, even though the server process is still running. When a container has a healthcheck specified, it has a _health status_ in addition to its normal status. This status is initially `starting`. Whenever a health check passes, it becomes `healthy` (whatever state it was previously in). After a certain number of consecutive failures, it becomes `unhealthy`. The options that can appear before `CMD` are: * `--interval=DURATION` (default: `30s`) * `--timeout=DURATION` (default: `30s`) * `--retries=N` (default: `1`) The health check will first run **interval** seconds after the container is started, and then again **interval** seconds after each previous check completes. If a single run of the check takes longer than **timeout** seconds then the check is considered to have failed. It takes **retries** consecutive failures of the health check for the container to be considered `unhealthy`. There can only be one `HEALTHCHECK` instruction in a Dockerfile. If you list more than one then only the last `HEALTHCHECK` will take effect. The command after the `CMD` keyword can be either a shell command (e.g. `HEALTHCHECK CMD /bin/check-running`) or an _exec_ array (as with other Dockerfile commands; see e.g. `ENTRYPOINT` for details). The command's exit status indicates the health status of the container. The possible values are: - 0: success - the container is healthy and ready for use - 1: unhealthy - the container is not working correctly - 2: starting - the container is not ready for use yet, but is working correctly If the probe returns 2 ("starting") when the container has already moved out of the "starting" state then it is treated as "unhealthy" instead. For example, to check every five minutes or so that a web-server is able to serve the site's main page within three seconds: HEALTHCHECK --interval=5m --timeout=3s \ CMD curl -f http://localhost/ || exit 1 To help debug failing probes, any output text (UTF-8 encoded) that the command writes on stdout or stderr will be stored in the health status and can be queried with `docker inspect`. Such output should be kept short (only the first 4096 bytes are stored currently). When the health status of a container changes, a `health_status` event is generated with the new status. The health status is also displayed in the `docker ps` output. Signed-off-by: Thomas Leonard <thomas.leonard@docker.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-04-18 05:48:13 -04:00
if imageConf.Healthcheck != nil {
if userConf.Healthcheck == nil {
userConf.Healthcheck = imageConf.Healthcheck
} else {
if len(userConf.Healthcheck.Test) == 0 {
userConf.Healthcheck.Test = imageConf.Healthcheck.Test
}
if userConf.Healthcheck.Interval == 0 {
userConf.Healthcheck.Interval = imageConf.Healthcheck.Interval
}
if userConf.Healthcheck.Timeout == 0 {
userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout
}
if userConf.Healthcheck.Retries == 0 {
userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries
}
}
}
if userConf.WorkingDir == "" {
userConf.WorkingDir = imageConf.WorkingDir
}
if len(userConf.Volumes) == 0 {
userConf.Volumes = imageConf.Volumes
} else {
for k, v := range imageConf.Volumes {
userConf.Volumes[k] = v
}
}
if userConf.StopSignal == "" {
userConf.StopSignal = imageConf.StopSignal
}
return nil
}
// Commit creates a new filesystem image from the current state of a container.
// The image can optionally be tagged into a repository.
func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (string, error) {
container, err := daemon.GetContainer(name)
if err != nil {
return "", err
}
// It is not possible to commit a running container on Windows
if runtime.GOOS == "windows" && container.IsRunning() {
return "", fmt.Errorf("Windows does not support commit of a running container")
}
if c.Pause && !container.IsPaused() {
daemon.containerPause(container)
defer daemon.containerUnpause(container)
}
newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes)
if err != nil {
return "", err
}
if c.MergeConfigs {
if err := merge(newConfig, container.Config); err != nil {
return "", err
}
}
rwTar, err := daemon.exportContainerRw(container)
if err != nil {
return "", err
}
defer func() {
if rwTar != nil {
rwTar.Close()
}
}()
var history []image.History
rootFS := image.NewRootFS()
osVersion := ""
var osFeatures []string
if container.ImageID != "" {
img, err := daemon.imageStore.Get(container.ImageID)
if err != nil {
return "", err
}
history = img.History
rootFS = img.RootFS
osVersion = img.OSVersion
osFeatures = img.OSFeatures
}
l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID())
if err != nil {
return "", err
}
defer layer.ReleaseAndLog(daemon.layerStore, l)
h := image.History{
Author: c.Author,
Created: time.Now().UTC(),
CreatedBy: strings.Join(container.Config.Cmd, " "),
Comment: c.Comment,
EmptyLayer: true,
}
if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID {
h.EmptyLayer = false
rootFS.Append(diffID)
}
history = append(history, h)
config, err := json.Marshal(&image.Image{
V1Image: image.V1Image{
DockerVersion: dockerversion.Version,
Config: newConfig,
Architecture: runtime.GOARCH,
OS: runtime.GOOS,
Container: container.ID,
ContainerConfig: *container.Config,
Author: c.Author,
Created: h.Created,
},
RootFS: rootFS,
History: history,
OSFeatures: osFeatures,
OSVersion: osVersion,
})
if err != nil {
return "", err
}
id, err := daemon.imageStore.Create(config)
if err != nil {
return "", err
}
if container.ImageID != "" {
if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil {
return "", err
}
}
if c.Repo != "" {
newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer
if err != nil {
return "", err
}
if c.Tag != "" {
if newTag, err = reference.WithTag(newTag, c.Tag); err != nil {
return "", err
}
}
if err := daemon.TagImageWithReference(id, newTag); err != nil {
return "", err
}
}
attributes := map[string]string{
"comment": c.Comment,
}
daemon.LogContainerEventWithAttributes(container, "commit", attributes)
return id.String(), nil
}
func (daemon *Daemon) exportContainerRw(container *container.Container) (archive.Archive, error) {
if err := daemon.Mount(container); err != nil {
return nil, err
}
archive, err := container.RWLayer.TarStream()
if err != nil {
daemon.Unmount(container) // logging is already handled in the `Unmount` function
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
archive.Close()
return container.RWLayer.Unmount()
}),
nil
}