package dockerfile // internals for handling commands. Covers many areas and a lot of // non-contiguous functionality. Please read the comments. import ( "crypto/sha256" "encoding/hex" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path/filepath" "runtime" "sort" "strings" "time" "github.com/Sirupsen/logrus" "github.com/docker/docker/builder" "github.com/docker/docker/builder/dockerfile/parser" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/runconfig/opts" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/strslice" ) func (b *Builder) addLabels() { // merge labels if len(b.options.Labels) > 0 { logrus.Debugf("[BUILDER] setting labels %v", b.options.Labels) if b.runConfig.Labels == nil { b.runConfig.Labels = make(map[string]string) } for kL, vL := range b.options.Labels { b.runConfig.Labels[kL] = vL } } } func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { if b.disableCommit { return nil } if b.image == "" && !b.noBaseImage { return fmt.Errorf("Please provide a source image with `from` prior to commit") } b.runConfig.Image = b.image if id == "" { cmd := b.runConfig.Cmd if runtime.GOOS != "windows" { b.runConfig.Cmd = strslice.StrSlice{"/bin/sh", "-c", "#(nop) " + comment} } else { b.runConfig.Cmd = strslice.StrSlice{"cmd", "/S /C", "REM (nop) " + comment} } defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) hit, err := b.probeCache() if err != nil { return err } else if hit { return nil } id, err = b.create() if err != nil { return err } } // Note: Actually copy the struct autoConfig := *b.runConfig autoConfig.Cmd = autoCmd commitCfg := &types.ContainerCommitConfig{ Author: b.maintainer, Pause: true, Config: &autoConfig, } // Commit the container imageID, err := b.docker.Commit(id, commitCfg) if err != nil { return err } b.image = imageID return nil } type copyInfo struct { builder.FileInfo decompress bool } func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } if len(args) < 2 { return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) } // Work in daemon-specific filepath semantics dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest b.runConfig.Image = b.image var infos []copyInfo // Loop through each src file and calculate the info we need to // do the copy (e.g. hash value if cached). Don't actually do // the copy until we've looked at all src files var err error for _, orig := range args[0 : len(args)-1] { var fi builder.FileInfo decompress := allowLocalDecompression if urlutil.IsURL(orig) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } fi, err = b.download(orig) if err != nil { return err } defer os.RemoveAll(filepath.Dir(fi.Path())) decompress = false infos = append(infos, copyInfo{fi, decompress}) continue } // not a URL subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) if err != nil { return err } infos = append(infos, subInfos...) } if len(infos) == 0 { return fmt.Errorf("No source files were specified") } if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } // For backwards compat, if there's just one info then use it as the // cache look-up string, otherwise hash 'em all into one var srcHash string var origPaths string if len(infos) == 1 { fi := infos[0].FileInfo origPaths = fi.Name() if hfi, ok := fi.(builder.Hashed); ok { srcHash = hfi.Hash() } } else { var hashs []string var origs []string for _, info := range infos { fi := info.FileInfo origs = append(origs, fi.Name()) if hfi, ok := fi.(builder.Hashed); ok { hashs = append(hashs, hfi.Hash()) } } hasher := sha256.New() hasher.Write([]byte(strings.Join(hashs, ","))) srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) origPaths = strings.Join(origs, " ") } cmd := b.runConfig.Cmd if runtime.GOOS != "windows" { b.runConfig.Cmd = strslice.StrSlice{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)} } else { b.runConfig.Cmd = strslice.StrSlice{"cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)} } defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) if hit, err := b.probeCache(); err != nil { return err } else if hit { return nil } container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) if err != nil { return err } b.tmpContainers[container.ID] = struct{}{} comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) // Twiddle the destination when its a relative path - meaning, make it // relative to the WORKINGDIR if !system.IsAbs(dest) { hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest) // Make sure we preserve any trailing slash if hasSlash { dest += string(os.PathSeparator) } } for _, info := range infos { if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { return err } } return b.commit(container.ID, cmd, comment) } func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { // get filename from URL u, err := url.Parse(srcURL) if err != nil { return } path := filepath.FromSlash(u.Path) // Ensure in platform semantics if strings.HasSuffix(path, string(os.PathSeparator)) { path = path[:len(path)-1] } parts := strings.Split(path, string(os.PathSeparator)) filename := parts[len(parts)-1] if filename == "" { err = fmt.Errorf("cannot determine filename from url: %s", u) return } // Initiate the download resp, err := httputils.Download(srcURL) if err != nil { return } // Prepare file in a tmp dir tmpDir, err := ioutils.TempDir("", "docker-remote") if err != nil { return } defer func() { if err != nil { os.RemoveAll(tmpDir) } }() tmpFileName := filepath.Join(tmpDir, filename) tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return } stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") // Download and dump result to tmp file if _, err = io.Copy(tmpFile, progressReader); err != nil { tmpFile.Close() return } fmt.Fprintln(b.Stdout) // ignoring error because the file was already opened successfully tmpFileSt, err := tmpFile.Stat() if err != nil { return } tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime mTime := time.Time{} lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if parsedMTime, err := http.ParseTime(lastMod); err == nil { mTime = parsedMTime } } if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { return } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) if err != nil { return } if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { return } hash := tarSum.Sum(nil) r.Close() return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil } func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { // Work in daemon-specific OS filepath semantics origPath = filepath.FromSlash(origPath) if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) // Deal with wildcards if allowWildcards && containsWildcards(origPath) { var copyInfos []copyInfo if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { if err != nil { return err } if info.Name() == "" { // Why are we doing this check? return nil } if match, _ := filepath.Match(origPath, path); !match { return nil } // Note we set allowWildcards to false in case the name has // a * in it subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) if err != nil { return err } copyInfos = append(copyInfos, subInfos...) return nil }); err != nil { return nil, err } return copyInfos, nil } // Must be a dir or a file statPath, fi, err := b.context.Stat(origPath) if err != nil { return nil, err } copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} hfi, handleHash := fi.(builder.Hashed) if !handleHash { return copyInfos, nil } // Deal with the single file case if !fi.IsDir() { hfi.SetHash("file:" + hfi.Hash()) return copyInfos, nil } // Must be a dir var subfiles []string err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { if err != nil { return err } // we already checked handleHash above subfiles = append(subfiles, info.(builder.Hashed).Hash()) return nil }) if err != nil { return nil, err } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) return copyInfos, nil } func containsWildcards(name string) bool { for i := 0; i < len(name); i++ { ch := name[i] if ch == '\\' { i++ } else if ch == '*' || ch == '?' || ch == '[' { return true } } return false } func (b *Builder) processImageFrom(img builder.Image) error { if img != nil { b.image = img.ImageID() if img.RunConfig() != nil { b.runConfig = img.RunConfig() } } // Check to see if we have a default PATH, note that windows won't // have one as its set by HCS if system.DefaultPathEnv != "" { // Convert the slice of strings that represent the current list // of env vars into a map so we can see if PATH is already set. // If its not set then go ahead and give it our default value configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) if _, ok := configEnv["PATH"]; !ok { b.runConfig.Env = append(b.runConfig.Env, "PATH="+system.DefaultPathEnv) } } if img == nil { // Typically this means they used "FROM scratch" return nil } // Process ONBUILD triggers if they exist if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { word := "trigger" if nTriggers > 1 { word = "triggers" } fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) } // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. onBuildTriggers := b.runConfig.OnBuild b.runConfig.OnBuild = []string{} // parse the ONBUILD triggers by invoking the parser for _, step := range onBuildTriggers { ast, err := parser.Parse(strings.NewReader(step)) if err != nil { return err } for i, n := range ast.Children { switch strings.ToUpper(n.Value) { case "ONBUILD": return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") case "MAINTAINER", "FROM": return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) } if err := b.dispatch(i, n); err != nil { return err } } } return nil } // probeCache checks if `b.docker` implements builder.ImageCache and image-caching // is enabled (`b.UseCache`). // If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`. // If an image is found, probeCache returns `(true, nil)`. // If no image is found, it returns `(false, nil)`. // If there is any error, it returns `(false, err)`. func (b *Builder) probeCache() (bool, error) { c, ok := b.docker.(builder.ImageCache) if !ok || b.options.NoCache || b.cacheBusted { return false, nil } cache, err := c.GetCachedImageOnBuild(b.image, b.runConfig) if err != nil { return false, err } if len(cache) == 0 { logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) b.cacheBusted = true return false, nil } fmt.Fprintf(b.Stdout, " ---> Using cache\n") logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) b.image = string(cache) return true, nil } func (b *Builder) create() (string, error) { if b.image == "" && !b.noBaseImage { return "", fmt.Errorf("Please provide a source image with `from` prior to run") } b.runConfig.Image = b.image resources := container.Resources{ CgroupParent: b.options.CgroupParent, CPUShares: b.options.CPUShares, CPUPeriod: b.options.CPUPeriod, CPUQuota: b.options.CPUQuota, CpusetCpus: b.options.CPUSetCPUs, CpusetMems: b.options.CPUSetMems, Memory: b.options.Memory, MemorySwap: b.options.MemorySwap, Ulimits: b.options.Ulimits, } // TODO: why not embed a hostconfig in builder? hostConfig := &container.HostConfig{ Isolation: b.options.Isolation, ShmSize: b.options.ShmSize, Resources: resources, } config := *b.runConfig // Create the container c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ Config: b.runConfig, HostConfig: hostConfig, }) if err != nil { return "", err } for _, warning := range c.Warnings { fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) } b.tmpContainers[c.ID] = struct{}{} fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) // override the entry point that may have been picked up from the base image if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { return "", err } return c.ID, nil } func (b *Builder) run(cID string) (err error) { errCh := make(chan error) go func() { errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) }() finished := make(chan struct{}) defer close(finished) go func() { select { case <-b.cancelled: logrus.Debugln("Build cancelled, killing and removing container:", cID) b.docker.ContainerKill(cID, 0) b.removeContainer(cID) case <-finished: } }() if err := b.docker.ContainerStart(cID, nil); err != nil { return err } // Block on reading output from container, stop on err or chan closed if err := <-errCh; err != nil { return err } if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { // TODO: change error type, because jsonmessage.JSONError assumes HTTP return &jsonmessage.JSONError{ Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), Code: ret, } } return nil } func (b *Builder) removeContainer(c string) error { rmConfig := &types.ContainerRmConfig{ ForceRemove: true, RemoveVolume: true, } if err := b.docker.ContainerRm(c, rmConfig); err != nil { fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) return err } return nil } func (b *Builder) clearTmp() { for c := range b.tmpContainers { if err := b.removeContainer(c); err != nil { return } delete(b.tmpContainers, c) fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) } } // readDockerfile reads a Dockerfile from the current context. func (b *Builder) readDockerfile() error { // If no -f was specified then look for 'Dockerfile'. If we can't find // that then look for 'dockerfile'. If neither are found then default // back to 'Dockerfile' and use that in the error message. if b.options.Dockerfile == "" { b.options.Dockerfile = builder.DefaultDockerfileName if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { lowercase := strings.ToLower(b.options.Dockerfile) if _, _, err := b.context.Stat(lowercase); err == nil { b.options.Dockerfile = lowercase } } } f, err := b.context.Open(b.options.Dockerfile) if err != nil { if os.IsNotExist(err) { return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) } return err } if f, ok := f.(*os.File); ok { // ignoring error because Open already succeeded fi, err := f.Stat() if err != nil { return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) } if fi.Size() == 0 { return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) } } b.dockerfile, err = parser.Parse(f) f.Close() if err != nil { return err } // After the Dockerfile has been parsed, we need to check the .dockerignore // file for either "Dockerfile" or ".dockerignore", and if either are // present then erase them from the build context. These files should never // have been sent from the client but we did send them to make sure that // we had the Dockerfile to actually parse, and then we also need the // .dockerignore file to know whether either file should be removed. // Note that this assumes the Dockerfile has been read into memory and // is now safe to be removed. if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { dockerIgnore.Process([]string{b.options.Dockerfile}) } return nil } // determine if build arg is part of built-in args or user // defined args in Dockerfile at any point in time. func (b *Builder) isBuildArgAllowed(arg string) bool { if _, ok := BuiltinAllowedBuildArgs[arg]; ok { return true } if _, ok := b.allowedBuildArgs[arg]; ok { return true } return false }