moby--moby/builder/dockerfile/builder.go

368 lines
10 KiB
Go
Raw Normal View History

package dockerfile
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/reference"
apierrors "github.com/docker/docker/api/errors"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/dockerfile/command"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/stringid"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
var validCommitCommands = map[string]bool{
Add support for user-defined healthchecks This PR adds support for user-defined health-check probes for Docker containers. It adds a `HEALTHCHECK` instruction to the Dockerfile syntax plus some corresponding "docker run" options. It can be used with a restart policy to automatically restart a container if the check fails. The `HEALTHCHECK` instruction has two forms: * `HEALTHCHECK [OPTIONS] CMD command` (check container health by running a command inside the container) * `HEALTHCHECK NONE` (disable any healthcheck inherited from the base image) The `HEALTHCHECK` instruction tells Docker how to test a container to check that it is still working. This can detect cases such as a web server that is stuck in an infinite loop and unable to handle new connections, even though the server process is still running. When a container has a healthcheck specified, it has a _health status_ in addition to its normal status. This status is initially `starting`. Whenever a health check passes, it becomes `healthy` (whatever state it was previously in). After a certain number of consecutive failures, it becomes `unhealthy`. The options that can appear before `CMD` are: * `--interval=DURATION` (default: `30s`) * `--timeout=DURATION` (default: `30s`) * `--retries=N` (default: `1`) The health check will first run **interval** seconds after the container is started, and then again **interval** seconds after each previous check completes. If a single run of the check takes longer than **timeout** seconds then the check is considered to have failed. It takes **retries** consecutive failures of the health check for the container to be considered `unhealthy`. There can only be one `HEALTHCHECK` instruction in a Dockerfile. If you list more than one then only the last `HEALTHCHECK` will take effect. The command after the `CMD` keyword can be either a shell command (e.g. `HEALTHCHECK CMD /bin/check-running`) or an _exec_ array (as with other Dockerfile commands; see e.g. `ENTRYPOINT` for details). The command's exit status indicates the health status of the container. The possible values are: - 0: success - the container is healthy and ready for use - 1: unhealthy - the container is not working correctly - 2: starting - the container is not ready for use yet, but is working correctly If the probe returns 2 ("starting") when the container has already moved out of the "starting" state then it is treated as "unhealthy" instead. For example, to check every five minutes or so that a web-server is able to serve the site's main page within three seconds: HEALTHCHECK --interval=5m --timeout=3s \ CMD curl -f http://localhost/ || exit 1 To help debug failing probes, any output text (UTF-8 encoded) that the command writes on stdout or stderr will be stored in the health status and can be queried with `docker inspect`. Such output should be kept short (only the first 4096 bytes are stored currently). When the health status of a container changes, a `health_status` event is generated with the new status. The health status is also displayed in the `docker ps` output. Signed-off-by: Thomas Leonard <thomas.leonard@docker.com> Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2016-04-18 09:48:13 +00:00
"cmd": true,
"entrypoint": true,
"healthcheck": true,
"env": true,
"expose": true,
"label": true,
"onbuild": true,
"user": true,
"volume": true,
"workdir": true,
}
var defaultLogConfig = container.LogConfig{Type: "none"}
// Builder is a Dockerfile builder
// It implements the builder.Backend interface.
type Builder struct {
options *types.ImageBuildOptions
Stdout io.Writer
Stderr io.Writer
Output io.Writer
docker builder.Backend
context builder.Context
clientCtx context.Context
runConfig *container.Config // runconfig for cmd, run, entrypoint etc.
flags *BFlags
tmpContainers map[string]struct{}
image string // imageID
imageContexts *imageContexts // helper for storing contexts from builds
noBaseImage bool // A flag to track the use of `scratch` as the base image
maintainer string
cmdSet bool
disableCommit bool
cacheBusted bool
buildArgs *buildArgs
directive *parser.Directive
imageCache builder.ImageCache
from builder.Image
}
// BuildManager implements builder.Backend and is shared across all Builder objects.
type BuildManager struct {
backend builder.Backend
pathCache *pathCache // TODO: make this persistent
}
// NewBuildManager creates a BuildManager.
func NewBuildManager(b builder.Backend) (bm *BuildManager) {
return &BuildManager{backend: b, pathCache: &pathCache{}}
}
// BuildFromContext builds a new image from a given context.
func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) {
if buildOptions.Squash && !bm.backend.HasExperimental() {
return "", apierrors.NewBadRequestError(errors.New("squash is only supported with experimental mode"))
}
buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc)
if err != nil {
return "", err
}
defer func() {
if err := buildContext.Close(); err != nil {
logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
}
}()
if len(dockerfileName) > 0 {
buildOptions.Dockerfile = dockerfileName
}
b, err := NewBuilder(ctx, buildOptions, bm.backend, builder.DockerIgnoreContext{ModifiableContext: buildContext})
if err != nil {
return "", err
}
b.imageContexts.cache = bm.pathCache
return b.build(pg.StdoutFormatter, pg.StderrFormatter, pg.Output)
}
// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config.
// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName,
// will be read from the Context passed to Build().
func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, buildContext builder.Context) (b *Builder, err error) {
if config == nil {
config = new(types.ImageBuildOptions)
}
b = &Builder{
clientCtx: clientCtx,
options: config,
Stdout: os.Stdout,
Stderr: os.Stderr,
docker: backend,
context: buildContext,
runConfig: new(container.Config),
tmpContainers: map[string]struct{}{},
buildArgs: newBuildArgs(config.BuildArgs),
directive: parser.NewDefaultDirective(),
}
b.imageContexts = &imageContexts{b: b}
return b, nil
}
func (b *Builder) resetImageCache() {
if icb, ok := b.docker.(builder.ImageCacheBuilder); ok {
b.imageCache = icb.MakeImageCache(b.options.CacheFrom)
}
b.noBaseImage = false
b.cacheBusted = false
}
// sanitizeRepoAndTags parses the raw "t" parameter received from the client
// to a slice of repoAndTag.
// It also validates each repoName and tag.
func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
var (
repoAndTags []reference.Named
// This map is used for deduplicating the "-t" parameter.
uniqNames = make(map[string]struct{})
)
for _, repo := range names {
if repo == "" {
continue
}
ref, err := reference.ParseNormalizedNamed(repo)
if err != nil {
return nil, err
}
if _, isCanonical := ref.(reference.Canonical); isCanonical {
return nil, errors.New("build tag cannot contain a digest")
}
ref = reference.TagNameOnly(ref)
nameWithTag := ref.String()
if _, exists := uniqNames[nameWithTag]; !exists {
uniqNames[nameWithTag] = struct{}{}
repoAndTags = append(repoAndTags, ref)
}
}
return repoAndTags, nil
}
// build runs the Dockerfile builder from a context and a docker object that allows to make calls
// to Docker.
func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (string, error) {
defer b.imageContexts.unmount()
b.Stdout = stdout
b.Stderr = stderr
b.Output = out
dockerfile, err := b.readAndParseDockerfile()
if err != nil {
return "", err
}
repoAndTags, err := sanitizeRepoAndTags(b.options.Tags)
if err != nil {
return "", err
}
addNodesForLabelOption(dockerfile, b.options.Labels)
if err := checkDispatchDockerfile(dockerfile); err != nil {
return "", err
}
shortImageID, err := b.dispatchDockerfileWithCancellation(dockerfile)
if err != nil {
return "", err
}
b.warnOnUnusedBuildArgs()
if b.image == "" {
return "", errors.New("No image was generated. Is your Dockerfile empty?")
}
if b.options.Squash {
if err := b.squashBuild(); err != nil {
return "", err
}
}
fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImageID)
if err := b.tagImages(repoAndTags); err != nil {
return "", err
}
return b.image, nil
}
func (b *Builder) dispatchDockerfileWithCancellation(dockerfile *parser.Node) (string, error) {
total := len(dockerfile.Children)
var shortImgID string
for i, n := range dockerfile.Children {
select {
case <-b.clientCtx.Done():
logrus.Debug("Builder: build cancelled!")
fmt.Fprint(b.Stdout, "Build cancelled")
return "", errors.New("Build cancelled")
default:
// Not cancelled yet, keep going...
}
if command.From == n.Value && b.imageContexts.isCurrentTarget(b.options.Target) {
break
}
if err := b.dispatch(i, total, n); err != nil {
if b.options.ForceRemove {
b.clearTmp()
}
return "", err
}
shortImgID = stringid.TruncateID(b.image)
fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID)
if b.options.Remove {
b.clearTmp()
}
}
if b.options.Target != "" && !b.imageContexts.isCurrentTarget(b.options.Target) {
return "", errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target)
}
return shortImgID, nil
}
func (b *Builder) squashBuild() error {
var fromID string
var err error
if b.from != nil {
fromID = b.from.ImageID()
}
b.image, err = b.docker.SquashImage(b.image, fromID)
if err != nil {
return errors.Wrap(err, "error squashing image")
}
return nil
}
func addNodesForLabelOption(dockerfile *parser.Node, labels map[string]string) {
if len(labels) == 0 {
return
}
node := parser.NodeFromLabels(labels)
dockerfile.Children = append(dockerfile.Children, node)
}
// check if there are any leftover build-args that were passed but not
// consumed during build. Print a warning, if there are any.
func (b *Builder) warnOnUnusedBuildArgs() {
leftoverArgs := b.buildArgs.UnreferencedOptionArgs()
if len(leftoverArgs) > 0 {
fmt.Fprintf(b.Stderr, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs)
}
}
func (b *Builder) tagImages(repoAndTags []reference.Named) error {
imageID := image.ID(b.image)
for _, rt := range repoAndTags {
if err := b.docker.TagImageWithReference(imageID, rt); err != nil {
return err
}
fmt.Fprintf(b.Stdout, "Successfully tagged %s\n", reference.FamiliarString(rt))
}
return nil
}
// hasFromImage returns true if the builder has processed a `FROM <image>` line
func (b *Builder) hasFromImage() bool {
return b.image != "" || b.noBaseImage
}
// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile
// It will:
// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries.
// - Do build by calling builder.dispatch() to call all entries' handling routines
//
// BuildFromConfig is used by the /commit endpoint, with the changes
// coming from the query parameter of the same name.
//
// TODO: Remove?
func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) {
b, err := NewBuilder(context.Background(), nil, nil, nil)
if err != nil {
return nil, err
}
ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")), b.directive)
if err != nil {
return nil, err
}
// ensure that the commands are valid
for _, n := range ast.Children {
if !validCommitCommands[n.Value] {
return nil, fmt.Errorf("%s is not a valid change command", n.Value)
}
}
b.runConfig = config
b.Stdout = ioutil.Discard
b.Stderr = ioutil.Discard
b.disableCommit = true
if err := checkDispatchDockerfile(ast); err != nil {
return nil, err
}
if err := dispatchFromDockerfile(b, ast); err != nil {
return nil, err
}
return b.runConfig, nil
}
func checkDispatchDockerfile(dockerfile *parser.Node) error {
for _, n := range dockerfile.Children {
if err := checkDispatch(n); err != nil {
return errors.Wrapf(err, "Dockerfile parse error line %d", n.StartLine)
}
}
return nil
}
func dispatchFromDockerfile(b *Builder, ast *parser.Node) error {
total := len(ast.Children)
for i, n := range ast.Children {
if err := b.dispatch(i, total, n); err != nil {
return err
}
}
return nil
}