2014-08-15 12:29:35 -04:00
|
|
|
package builder
|
2014-08-05 16:17:40 -04:00
|
|
|
|
2014-08-07 01:56:44 -04:00
|
|
|
// internals for handling commands. Covers many areas and a lot of
|
|
|
|
// non-contiguous functionality. Please read the comments.
|
|
|
|
|
2014-08-05 18:41:09 -04:00
|
|
|
import (
|
|
|
|
"crypto/sha256"
|
|
|
|
"encoding/hex"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2014-10-22 14:16:42 -04:00
|
|
|
"net/http"
|
2014-08-05 18:41:09 -04:00
|
|
|
"net/url"
|
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"path/filepath"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
2014-10-24 13:12:35 -04:00
|
|
|
log "github.com/Sirupsen/logrus"
|
2014-10-13 16:14:35 -04:00
|
|
|
"github.com/docker/docker/builder/parser"
|
2014-08-05 18:41:09 -04:00
|
|
|
"github.com/docker/docker/daemon"
|
|
|
|
imagepkg "github.com/docker/docker/image"
|
2014-09-30 02:23:36 -04:00
|
|
|
"github.com/docker/docker/pkg/archive"
|
2014-10-29 15:06:51 -04:00
|
|
|
"github.com/docker/docker/pkg/chrootarchive"
|
2014-08-05 18:41:09 -04:00
|
|
|
"github.com/docker/docker/pkg/parsers"
|
|
|
|
"github.com/docker/docker/pkg/symlink"
|
|
|
|
"github.com/docker/docker/pkg/system"
|
|
|
|
"github.com/docker/docker/pkg/tarsum"
|
2014-11-24 18:47:42 -05:00
|
|
|
"github.com/docker/docker/pkg/urlutil"
|
2014-08-05 18:41:09 -04:00
|
|
|
"github.com/docker/docker/registry"
|
|
|
|
"github.com/docker/docker/utils"
|
|
|
|
)
|
|
|
|
|
2014-08-26 15:25:44 -04:00
|
|
|
func (b *Builder) readContext(context io.Reader) error {
|
2014-08-05 16:17:40 -04:00
|
|
|
tmpdirPath, err := ioutil.TempDir("", "docker-build")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
decompressedStream, err := archive.DecompressStream(context)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-08-21 16:12:52 -04:00
|
|
|
if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-10-29 15:06:51 -04:00
|
|
|
|
|
|
|
if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
|
2014-08-05 16:17:40 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
b.contextPath = tmpdirPath
|
2014-08-05 18:41:09 -04:00
|
|
|
return nil
|
2014-08-05 16:17:40 -04:00
|
|
|
}
|
|
|
|
|
2014-08-26 15:25:44 -04:00
|
|
|
func (b *Builder) commit(id string, autoCmd []string, comment string) error {
|
2014-10-28 17:06:23 -04:00
|
|
|
if b.image == "" && !b.noBaseImage {
|
2014-08-05 16:17:40 -04:00
|
|
|
return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
|
|
|
}
|
2014-08-11 11:44:31 -04:00
|
|
|
b.Config.Image = b.image
|
2014-08-05 16:17:40 -04:00
|
|
|
if id == "" {
|
2014-08-11 11:44:31 -04:00
|
|
|
cmd := b.Config.Cmd
|
|
|
|
b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
|
|
|
|
defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
|
2014-08-05 16:17:40 -04:00
|
|
|
|
|
|
|
hit, err := b.probeCache()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if hit {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-09-04 11:43:28 -04:00
|
|
|
container, err := b.create()
|
2014-08-05 16:17:40 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
id = container.ID
|
|
|
|
|
|
|
|
if err := container.Mount(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer container.Unmount()
|
|
|
|
}
|
2014-08-26 15:25:44 -04:00
|
|
|
container := b.Daemon.Get(id)
|
2014-08-05 16:17:40 -04:00
|
|
|
if container == nil {
|
|
|
|
return fmt.Errorf("An error occured while creating the container")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: Actually copy the struct
|
2014-08-11 11:44:31 -04:00
|
|
|
autoConfig := *b.Config
|
2014-08-05 16:17:40 -04:00
|
|
|
autoConfig.Cmd = autoCmd
|
2014-08-30 07:34:09 -04:00
|
|
|
|
2014-08-05 16:17:40 -04:00
|
|
|
// Commit the container
|
2014-08-26 15:25:44 -04:00
|
|
|
image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
|
2014-08-05 16:17:40 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
b.image = image.ID
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-09-16 12:58:20 -04:00
|
|
|
type copyInfo struct {
|
|
|
|
origPath string
|
|
|
|
destPath string
|
2014-09-22 09:41:02 -04:00
|
|
|
hash string
|
2014-09-16 12:58:20 -04:00
|
|
|
decompress bool
|
|
|
|
tmpDir string
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:25:44 -04:00
|
|
|
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
|
2014-08-05 16:17:40 -04:00
|
|
|
if b.context == nil {
|
|
|
|
return fmt.Errorf("No context given. Impossible to use %s", cmdName)
|
|
|
|
}
|
|
|
|
|
2014-09-16 12:58:20 -04:00
|
|
|
if len(args) < 2 {
|
|
|
|
return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
|
|
|
|
}
|
|
|
|
|
|
|
|
dest := args[len(args)-1] // last one is always the dest
|
|
|
|
|
2014-09-22 09:41:02 -04:00
|
|
|
copyInfos := []*copyInfo{}
|
2014-09-16 12:58:20 -04:00
|
|
|
|
|
|
|
b.Config.Image = b.image
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
for _, ci := range copyInfos {
|
|
|
|
if ci.tmpDir != "" {
|
|
|
|
os.RemoveAll(ci.tmpDir)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Loop through each src file and calculate the info we need to
|
|
|
|
// do the copy (e.g. hash value if cached). Don't actually do
|
|
|
|
// the copy until we've looked at all src files
|
2014-09-22 09:41:02 -04:00
|
|
|
for _, orig := range args[0 : len(args)-1] {
|
|
|
|
err := calcCopyInfo(b, cmdName, ©Infos, orig, dest, allowRemote, allowDecompression)
|
2014-09-16 12:58:20 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-09-22 09:41:02 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(copyInfos) == 0 {
|
|
|
|
return fmt.Errorf("No source files were specified")
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") {
|
|
|
|
return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
|
|
|
}
|
2014-09-16 12:58:20 -04:00
|
|
|
|
2014-09-22 09:41:02 -04:00
|
|
|
// For backwards compat, if there's just one CI then use it as the
|
|
|
|
// cache look-up string, otherwise hash 'em all into one
|
|
|
|
var srcHash string
|
|
|
|
var origPaths string
|
|
|
|
|
|
|
|
if len(copyInfos) == 1 {
|
|
|
|
srcHash = copyInfos[0].hash
|
|
|
|
origPaths = copyInfos[0].origPath
|
|
|
|
} else {
|
|
|
|
var hashs []string
|
|
|
|
var origs []string
|
|
|
|
for _, ci := range copyInfos {
|
|
|
|
hashs = append(hashs, ci.hash)
|
|
|
|
origs = append(origs, ci.origPath)
|
2014-09-16 12:58:20 -04:00
|
|
|
}
|
2014-09-22 09:41:02 -04:00
|
|
|
hasher := sha256.New()
|
|
|
|
hasher.Write([]byte(strings.Join(hashs, ",")))
|
|
|
|
srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
|
|
|
|
origPaths = strings.Join(origs, " ")
|
2014-09-16 12:58:20 -04:00
|
|
|
}
|
2014-08-05 16:17:40 -04:00
|
|
|
|
2014-08-11 11:44:31 -04:00
|
|
|
cmd := b.Config.Cmd
|
2014-09-22 09:41:02 -04:00
|
|
|
b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)}
|
2014-08-11 11:44:31 -04:00
|
|
|
defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)
|
2014-08-05 16:17:40 -04:00
|
|
|
|
2014-09-16 12:58:20 -04:00
|
|
|
hit, err := b.probeCache()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// If we do not have at least one hash, never use the cache
|
2014-09-22 09:41:02 -04:00
|
|
|
if hit && b.UtilizeCache {
|
2014-09-16 12:58:20 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:23:59 -04:00
|
|
|
container, _, err := b.Daemon.Create(b.Config, nil, "")
|
2014-09-16 12:58:20 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
b.TmpContainers[container.ID] = struct{}{}
|
|
|
|
|
|
|
|
if err := container.Mount(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer container.Unmount()
|
|
|
|
|
|
|
|
for _, ci := range copyInfos {
|
|
|
|
if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-22 09:41:02 -04:00
|
|
|
if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
|
2014-09-16 12:58:20 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-09-22 09:41:02 -04:00
|
|
|
func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {
|
|
|
|
|
|
|
|
if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
|
|
|
|
origPath = origPath[1:]
|
|
|
|
}
|
|
|
|
origPath = strings.TrimPrefix(origPath, "./")
|
|
|
|
|
2014-12-12 13:32:11 -05:00
|
|
|
// Twiddle the destPath when its a relative path - meaning, make it
|
|
|
|
// relative to the WORKINGDIR
|
|
|
|
if !filepath.IsAbs(destPath) {
|
|
|
|
hasSlash := strings.HasSuffix(destPath, "/")
|
|
|
|
destPath = filepath.Join("/", b.Config.WorkingDir, destPath)
|
|
|
|
|
|
|
|
// Make sure we preserve any trailing slash
|
|
|
|
if hasSlash {
|
|
|
|
destPath += "/"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-22 09:41:02 -04:00
|
|
|
// In the remote/URL case, download it and gen its hashcode
|
2014-11-24 18:47:42 -05:00
|
|
|
if urlutil.IsURL(origPath) {
|
2014-09-22 09:41:02 -04:00
|
|
|
if !allowRemote {
|
|
|
|
return fmt.Errorf("Source can't be a URL for %s", cmdName)
|
|
|
|
}
|
2014-08-05 16:17:40 -04:00
|
|
|
|
2014-09-22 09:41:02 -04:00
|
|
|
ci := copyInfo{}
|
|
|
|
ci.origPath = origPath
|
|
|
|
ci.hash = origPath // default to this but can change
|
|
|
|
ci.destPath = destPath
|
|
|
|
ci.decompress = false
|
|
|
|
*cInfos = append(*cInfos, &ci)
|
2014-09-16 12:58:20 -04:00
|
|
|
|
2014-08-05 16:17:40 -04:00
|
|
|
// Initiate the download
|
2014-09-16 12:58:20 -04:00
|
|
|
resp, err := utils.Download(ci.origPath)
|
2014-08-05 16:17:40 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a tmp dir
|
|
|
|
tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-09-16 12:58:20 -04:00
|
|
|
ci.tmpDir = tmpDirName
|
2014-08-05 16:17:40 -04:00
|
|
|
|
|
|
|
// Create a tmp file within our tmp dir
|
|
|
|
tmpFileName := path.Join(tmpDirName, "tmp")
|
|
|
|
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Download and dump result to tmp file
|
2014-09-02 20:17:08 -04:00
|
|
|
if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil {
|
2014-08-05 16:17:40 -04:00
|
|
|
tmpFile.Close()
|
|
|
|
return err
|
|
|
|
}
|
2014-09-02 20:17:08 -04:00
|
|
|
fmt.Fprintf(b.OutStream, "\n")
|
2014-08-05 16:17:40 -04:00
|
|
|
tmpFile.Close()
|
|
|
|
|
2014-10-22 14:16:42 -04:00
|
|
|
// Set the mtime to the Last-Modified header value if present
|
|
|
|
// Otherwise just remove atime and mtime
|
|
|
|
times := make([]syscall.Timespec, 2)
|
|
|
|
|
|
|
|
lastMod := resp.Header.Get("Last-Modified")
|
|
|
|
if lastMod != "" {
|
|
|
|
mTime, err := http.ParseTime(lastMod)
|
|
|
|
// If we can't parse it then just let it default to 'zero'
|
|
|
|
// otherwise use the parsed time value
|
|
|
|
if err == nil {
|
|
|
|
times[1] = syscall.NsecToTimespec(mTime.UnixNano())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := system.UtimesNano(tmpFileName, times); err != nil {
|
2014-08-05 16:17:40 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-09-16 12:58:20 -04:00
|
|
|
ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
|
2014-08-05 16:17:40 -04:00
|
|
|
|
|
|
|
// If the destination is a directory, figure out the filename.
|
2014-09-16 12:58:20 -04:00
|
|
|
if strings.HasSuffix(ci.destPath, "/") {
|
2014-09-22 09:41:02 -04:00
|
|
|
u, err := url.Parse(origPath)
|
2014-08-05 16:17:40 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
path := u.Path
|
|
|
|
if strings.HasSuffix(path, "/") {
|
|
|
|
path = path[:len(path)-1]
|
|
|
|
}
|
|
|
|
parts := strings.Split(path, "/")
|
|
|
|
filename := parts[len(parts)-1]
|
|
|
|
if filename == "" {
|
|
|
|
return fmt.Errorf("cannot determine filename from url: %s", u)
|
|
|
|
}
|
2014-09-16 12:58:20 -04:00
|
|
|
ci.destPath = ci.destPath + filename
|
2014-08-05 16:17:40 -04:00
|
|
|
}
|
|
|
|
|
2015-01-08 09:56:30 -05:00
|
|
|
// Calc the checksum, even if we're using the cache
|
|
|
|
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-09-22 09:41:02 -04:00
|
|
|
}
|
2015-01-08 09:56:30 -05:00
|
|
|
tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ci.hash = tarSum.Sum(nil)
|
|
|
|
r.Close()
|
2014-09-22 09:41:02 -04:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deal with wildcards
|
|
|
|
if ContainsWildcards(origPath) {
|
|
|
|
for _, fileInfo := range b.context.GetSums() {
|
|
|
|
if fileInfo.Name() == "" {
|
|
|
|
continue
|
2014-08-05 16:17:40 -04:00
|
|
|
}
|
2014-09-22 09:41:02 -04:00
|
|
|
match, _ := path.Match(origPath, fileInfo.Name())
|
|
|
|
if !match {
|
|
|
|
continue
|
2014-08-05 16:17:40 -04:00
|
|
|
}
|
2014-09-22 09:41:02 -04:00
|
|
|
|
|
|
|
calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Must be a dir or a file
|
|
|
|
|
|
|
|
if err := b.checkPathForAddition(origPath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fi, _ := os.Stat(path.Join(b.contextPath, origPath))
|
|
|
|
|
|
|
|
ci := copyInfo{}
|
|
|
|
ci.origPath = origPath
|
|
|
|
ci.hash = origPath
|
|
|
|
ci.destPath = destPath
|
|
|
|
ci.decompress = allowDecompression
|
|
|
|
*cInfos = append(*cInfos, &ci)
|
|
|
|
|
|
|
|
// Deal with the single file case
|
|
|
|
if !fi.IsDir() {
|
|
|
|
// This will match first file in sums of the archive
|
|
|
|
fis := b.context.GetSums().GetFile(ci.origPath)
|
|
|
|
if fis != nil {
|
|
|
|
ci.hash = "file:" + fis.Sum()
|
2014-08-05 16:17:40 -04:00
|
|
|
}
|
2014-09-22 09:41:02 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Must be a dir
|
|
|
|
var subfiles []string
|
|
|
|
absOrigPath := path.Join(b.contextPath, ci.origPath)
|
2014-08-05 16:17:40 -04:00
|
|
|
|
2014-09-22 09:41:02 -04:00
|
|
|
// Add a trailing / to make sure we only pick up nested files under
|
|
|
|
// the dir and not sibling files of the dir that just happen to
|
|
|
|
// start with the same chars
|
|
|
|
if !strings.HasSuffix(absOrigPath, "/") {
|
|
|
|
absOrigPath += "/"
|
2014-08-05 16:17:40 -04:00
|
|
|
}
|
|
|
|
|
2014-09-22 09:41:02 -04:00
|
|
|
// Need path w/o / too to find matching dir w/o trailing /
|
|
|
|
absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]
|
|
|
|
|
|
|
|
for _, fileInfo := range b.context.GetSums() {
|
|
|
|
absFile := path.Join(b.contextPath, fileInfo.Name())
|
2014-10-23 17:30:11 -04:00
|
|
|
// Any file in the context that starts with the given path will be
|
|
|
|
// picked up and its hashcode used. However, we'll exclude the
|
|
|
|
// root dir itself. We do this for a coupel of reasons:
|
|
|
|
// 1 - ADD/COPY will not copy the dir itself, just its children
|
|
|
|
// so there's no reason to include it in the hash calc
|
|
|
|
// 2 - the metadata on the dir will change when any child file
|
|
|
|
// changes. This will lead to a miss in the cache check if that
|
|
|
|
// child file is in the .dockerignore list.
|
|
|
|
if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
|
2014-09-22 09:41:02 -04:00
|
|
|
subfiles = append(subfiles, fileInfo.Sum())
|
|
|
|
}
|
2014-08-05 16:17:40 -04:00
|
|
|
}
|
2014-09-22 09:41:02 -04:00
|
|
|
sort.Strings(subfiles)
|
|
|
|
hasher := sha256.New()
|
|
|
|
hasher.Write([]byte(strings.Join(subfiles, ",")))
|
|
|
|
ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
|
|
|
|
|
2014-08-05 16:17:40 -04:00
|
|
|
return nil
|
|
|
|
}
|
2014-08-05 18:41:09 -04:00
|
|
|
|
2014-09-22 09:41:02 -04:00
|
|
|
func ContainsWildcards(name string) bool {
|
|
|
|
for i := 0; i < len(name); i++ {
|
|
|
|
ch := name[i]
|
|
|
|
if ch == '\\' {
|
|
|
|
i++
|
|
|
|
} else if ch == '*' || ch == '?' || ch == '[' {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:25:44 -04:00
|
|
|
func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
|
2014-08-05 18:41:09 -04:00
|
|
|
remote, tag := parsers.ParseRepositoryTag(name)
|
2014-09-03 11:27:25 -04:00
|
|
|
if tag == "" {
|
|
|
|
tag = "latest"
|
|
|
|
}
|
2014-10-06 21:54:52 -04:00
|
|
|
job := b.Engine.Job("pull", remote, tag)
|
2014-08-26 15:25:44 -04:00
|
|
|
pullRegistryAuth := b.AuthConfig
|
|
|
|
if len(b.AuthConfigFile.Configs) > 0 {
|
2014-08-05 18:41:09 -04:00
|
|
|
// The request came with a full auth config file, we prefer to use that
|
2014-10-06 21:54:52 -04:00
|
|
|
repoInfo, err := registry.ResolveRepositoryInfo(job, remote)
|
2014-08-05 18:41:09 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-10-06 21:54:52 -04:00
|
|
|
resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(repoInfo.Index)
|
2014-08-05 18:41:09 -04:00
|
|
|
pullRegistryAuth = &resolvedAuth
|
|
|
|
}
|
2014-08-26 15:25:44 -04:00
|
|
|
job.SetenvBool("json", b.StreamFormatter.Json())
|
2014-08-05 18:41:09 -04:00
|
|
|
job.SetenvBool("parallel", true)
|
|
|
|
job.SetenvJson("authConfig", pullRegistryAuth)
|
2014-08-26 15:25:44 -04:00
|
|
|
job.Stdout.Add(b.OutOld)
|
2014-08-05 18:41:09 -04:00
|
|
|
if err := job.Run(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-08-26 15:25:44 -04:00
|
|
|
image, err := b.Daemon.Repositories().LookupImage(name)
|
2014-08-05 18:41:09 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return image, nil
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:25:44 -04:00
|
|
|
func (b *Builder) processImageFrom(img *imagepkg.Image) error {
|
2014-08-05 18:41:09 -04:00
|
|
|
b.image = img.ID
|
2014-08-13 06:07:41 -04:00
|
|
|
|
2014-08-05 18:41:09 -04:00
|
|
|
if img.Config != nil {
|
2014-08-11 11:44:31 -04:00
|
|
|
b.Config = img.Config
|
2014-08-05 18:41:09 -04:00
|
|
|
}
|
2014-08-13 06:07:41 -04:00
|
|
|
|
2014-08-19 07:14:21 -04:00
|
|
|
if len(b.Config.Env) == 0 {
|
2014-08-11 11:44:31 -04:00
|
|
|
b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv)
|
2014-08-05 18:41:09 -04:00
|
|
|
}
|
2014-08-13 06:07:41 -04:00
|
|
|
|
2014-08-05 18:41:09 -04:00
|
|
|
// Process ONBUILD triggers if they exist
|
2014-08-11 11:44:31 -04:00
|
|
|
if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
|
2014-08-26 15:25:44 -04:00
|
|
|
fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers)
|
2014-08-05 18:41:09 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
|
2014-08-11 11:44:31 -04:00
|
|
|
onBuildTriggers := b.Config.OnBuild
|
|
|
|
b.Config.OnBuild = []string{}
|
2014-08-05 18:41:09 -04:00
|
|
|
|
2014-10-13 16:14:35 -04:00
|
|
|
// parse the ONBUILD triggers by invoking the parser
|
2014-09-23 16:31:42 -04:00
|
|
|
for stepN, step := range onBuildTriggers {
|
2014-10-13 16:14:35 -04:00
|
|
|
ast, err := parser.Parse(strings.NewReader(step))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-08-05 18:41:09 -04:00
|
|
|
}
|
|
|
|
|
2014-10-13 16:14:35 -04:00
|
|
|
for i, n := range ast.Children {
|
|
|
|
switch strings.ToUpper(n.Value) {
|
|
|
|
case "ONBUILD":
|
|
|
|
return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
|
|
|
case "MAINTAINER", "FROM":
|
|
|
|
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
|
|
|
|
}
|
2014-08-05 18:41:09 -04:00
|
|
|
|
2014-09-23 16:31:42 -04:00
|
|
|
fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step)
|
2014-10-13 16:14:35 -04:00
|
|
|
|
|
|
|
if err := b.dispatch(i, n); err != nil {
|
2014-08-05 18:41:09 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:25:44 -04:00
|
|
|
// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`)
|
2014-08-11 11:44:31 -04:00
|
|
|
// and if so attempts to look up the current `b.image` and `b.Config` pair
|
2014-08-26 15:25:44 -04:00
|
|
|
// in the current server `b.Daemon`. If an image is found, probeCache returns
|
2014-08-05 18:41:09 -04:00
|
|
|
// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
|
|
|
|
// is any error, it returns `(false, err)`.
|
2014-08-26 15:25:44 -04:00
|
|
|
func (b *Builder) probeCache() (bool, error) {
|
|
|
|
if b.UtilizeCache {
|
|
|
|
if cache, err := b.Daemon.ImageGetCached(b.image, b.Config); err != nil {
|
2014-08-05 18:41:09 -04:00
|
|
|
return false, err
|
|
|
|
} else if cache != nil {
|
2014-08-26 15:25:44 -04:00
|
|
|
fmt.Fprintf(b.OutStream, " ---> Using cache\n")
|
2014-08-13 06:07:41 -04:00
|
|
|
log.Debugf("[BUILDER] Use cached version")
|
2014-08-05 18:41:09 -04:00
|
|
|
b.image = cache.ID
|
|
|
|
return true, nil
|
|
|
|
} else {
|
2014-08-13 06:07:41 -04:00
|
|
|
log.Debugf("[BUILDER] Cache miss")
|
2014-08-05 18:41:09 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:25:44 -04:00
|
|
|
func (b *Builder) create() (*daemon.Container, error) {
|
2014-10-28 17:06:23 -04:00
|
|
|
if b.image == "" && !b.noBaseImage {
|
2014-08-05 18:41:09 -04:00
|
|
|
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
|
|
}
|
2014-08-11 11:44:31 -04:00
|
|
|
b.Config.Image = b.image
|
2014-08-05 18:41:09 -04:00
|
|
|
|
2014-09-04 11:43:28 -04:00
|
|
|
config := *b.Config
|
|
|
|
|
2014-08-05 18:41:09 -04:00
|
|
|
// Create the container
|
2014-09-25 17:23:59 -04:00
|
|
|
c, warnings, err := b.Daemon.Create(b.Config, nil, "")
|
2014-08-05 18:41:09 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-09-04 11:43:28 -04:00
|
|
|
for _, warning := range warnings {
|
|
|
|
fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning)
|
|
|
|
}
|
2014-08-26 15:25:44 -04:00
|
|
|
|
2014-08-11 11:44:31 -04:00
|
|
|
b.TmpContainers[c.ID] = struct{}{}
|
2014-08-26 15:25:44 -04:00
|
|
|
fmt.Fprintf(b.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
|
2014-08-05 18:41:09 -04:00
|
|
|
|
2015-01-13 14:13:27 -05:00
|
|
|
if len(config.Cmd) > 0 {
|
2015-01-08 20:00:00 -05:00
|
|
|
// override the entry point that may have been picked up from the base image
|
|
|
|
c.Path = config.Cmd[0]
|
|
|
|
c.Args = config.Cmd[1:]
|
|
|
|
} else {
|
|
|
|
config.Cmd = []string{}
|
|
|
|
}
|
2014-08-05 18:41:09 -04:00
|
|
|
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:25:44 -04:00
|
|
|
func (b *Builder) run(c *daemon.Container) error {
|
2014-08-05 18:41:09 -04:00
|
|
|
//start the container
|
|
|
|
if err := c.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-10-06 12:41:22 -04:00
|
|
|
if b.Verbose {
|
|
|
|
logsJob := b.Engine.Job("logs", c.ID)
|
|
|
|
logsJob.Setenv("follow", "1")
|
|
|
|
logsJob.Setenv("stdout", "1")
|
|
|
|
logsJob.Setenv("stderr", "1")
|
|
|
|
logsJob.Stdout.Add(b.OutStream)
|
2014-12-09 22:08:24 -05:00
|
|
|
logsJob.Stderr.Set(b.ErrStream)
|
2014-10-06 12:41:22 -04:00
|
|
|
if err := logsJob.Run(); err != nil {
|
2014-08-05 18:41:09 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for it to finish
|
2014-08-31 11:20:35 -04:00
|
|
|
if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
|
2014-08-05 18:41:09 -04:00
|
|
|
err := &utils.JSONError{
|
2014-08-11 11:44:31 -04:00
|
|
|
Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
|
2014-08-05 18:41:09 -04:00
|
|
|
Code: ret,
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:25:44 -04:00
|
|
|
func (b *Builder) checkPathForAddition(orig string) error {
|
2014-08-05 18:41:09 -04:00
|
|
|
origPath := path.Join(b.contextPath, orig)
|
2014-08-13 06:07:41 -04:00
|
|
|
origPath, err := filepath.EvalSymlinks(origPath)
|
|
|
|
if err != nil {
|
2014-08-05 18:41:09 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return fmt.Errorf("%s: no such file or directory", orig)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !strings.HasPrefix(origPath, b.contextPath) {
|
|
|
|
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
|
|
|
}
|
2014-08-13 06:07:41 -04:00
|
|
|
if _, err := os.Stat(origPath); err != nil {
|
2014-08-05 18:41:09 -04:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return fmt.Errorf("%s: no such file or directory", orig)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:25:44 -04:00
|
|
|
func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
|
2014-08-05 18:41:09 -04:00
|
|
|
var (
|
|
|
|
err error
|
|
|
|
destExists = true
|
|
|
|
origPath = path.Join(b.contextPath, orig)
|
|
|
|
destPath = path.Join(container.RootfsPath(), dest)
|
|
|
|
)
|
|
|
|
|
|
|
|
if destPath != container.RootfsPath() {
|
|
|
|
destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Preserve the trailing '/'
|
|
|
|
if strings.HasSuffix(dest, "/") || dest == "." {
|
|
|
|
destPath = destPath + "/"
|
|
|
|
}
|
|
|
|
|
|
|
|
destStat, err := os.Stat(destPath)
|
|
|
|
if err != nil {
|
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
destExists = false
|
|
|
|
}
|
|
|
|
|
|
|
|
fi, err := os.Stat(origPath)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return fmt.Errorf("%s: no such file or directory", orig)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if fi.IsDir() {
|
2014-12-11 13:40:16 -05:00
|
|
|
return copyAsDirectory(origPath, destPath, destExists)
|
2014-08-05 18:41:09 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
|
|
|
|
if decompress {
|
|
|
|
// First try to unpack the source as an archive
|
|
|
|
// to support the untar feature we need to clean up the path a little bit
|
|
|
|
// because tar is very forgiving. First we need to strip off the archive's
|
|
|
|
// filename from the path but this is only added if it does not end in / .
|
|
|
|
tarDest := destPath
|
|
|
|
if strings.HasSuffix(tarDest, "/") {
|
|
|
|
tarDest = filepath.Dir(destPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
// try to successfully untar the orig
|
2014-10-29 15:06:51 -04:00
|
|
|
if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
|
2014-08-05 18:41:09 -04:00
|
|
|
return nil
|
|
|
|
} else if err != io.EOF {
|
2014-08-13 06:07:41 -04:00
|
|
|
log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
2014-08-05 18:41:09 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-10-29 15:06:51 -04:00
|
|
|
if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
|
2014-08-05 18:41:09 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
resPath := destPath
|
|
|
|
if destExists && destStat.IsDir() {
|
|
|
|
resPath = path.Join(destPath, path.Base(origPath))
|
|
|
|
}
|
|
|
|
|
2014-12-11 13:40:16 -05:00
|
|
|
return fixPermissions(origPath, resPath, 0, 0, destExists)
|
2014-08-05 18:41:09 -04:00
|
|
|
}
|
|
|
|
|
2014-12-11 13:40:16 -05:00
|
|
|
func copyAsDirectory(source, destination string, destExisted bool) error {
|
2014-10-29 15:06:51 -04:00
|
|
|
if err := chrootarchive.CopyWithTar(source, destination); err != nil {
|
2014-08-05 18:41:09 -04:00
|
|
|
return err
|
|
|
|
}
|
2014-12-11 13:40:16 -05:00
|
|
|
return fixPermissions(source, destination, 0, 0, destExisted)
|
2014-08-05 18:41:09 -04:00
|
|
|
}
|
|
|
|
|
2014-12-11 13:40:16 -05:00
|
|
|
func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
|
|
|
// If the destination didn't already exist, or the destination isn't a
|
|
|
|
// directory, then we should Lchown the destination. Otherwise, we shouldn't
|
|
|
|
// Lchown the destination.
|
|
|
|
destStat, err := os.Stat(destination)
|
|
|
|
if err != nil {
|
|
|
|
// This should *never* be reached, because the destination must've already
|
|
|
|
// been created while untar-ing the context.
|
2014-12-10 14:09:03 -05:00
|
|
|
return err
|
|
|
|
}
|
2014-12-11 13:40:16 -05:00
|
|
|
doChownDestination := !destExisted || !destStat.IsDir()
|
2014-12-10 14:09:03 -05:00
|
|
|
|
2014-12-02 21:45:07 -05:00
|
|
|
// We Walk on the source rather than on the destination because we don't
|
|
|
|
// want to change permissions on things we haven't created or modified.
|
|
|
|
return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
|
2014-12-11 13:40:16 -05:00
|
|
|
// Do not alter the walk root iff. it existed before, as it doesn't fall under
|
|
|
|
// the domain of "things we should chown".
|
|
|
|
if !doChownDestination && (source == fullpath) {
|
2014-12-02 21:45:07 -05:00
|
|
|
return nil
|
|
|
|
}
|
2014-12-11 13:40:16 -05:00
|
|
|
|
2014-12-02 21:45:07 -05:00
|
|
|
// Path is prefixed by source: substitute with destination instead.
|
|
|
|
cleaned, err := filepath.Rel(source, fullpath)
|
|
|
|
if err != nil {
|
2014-08-05 18:41:09 -04:00
|
|
|
return err
|
|
|
|
}
|
2014-12-11 13:40:16 -05:00
|
|
|
|
2014-12-02 21:45:07 -05:00
|
|
|
fullpath = path.Join(destination, cleaned)
|
|
|
|
return os.Lchown(fullpath, uid, gid)
|
2014-08-05 18:41:09 -04:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:25:44 -04:00
|
|
|
func (b *Builder) clearTmp() {
|
2014-08-19 07:14:21 -04:00
|
|
|
for c := range b.TmpContainers {
|
2014-08-26 15:25:44 -04:00
|
|
|
tmp := b.Daemon.Get(c)
|
|
|
|
if err := b.Daemon.Destroy(tmp); err != nil {
|
|
|
|
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
|
2014-08-28 10:18:08 -04:00
|
|
|
return
|
2014-08-05 18:41:09 -04:00
|
|
|
}
|
2014-08-28 10:18:08 -04:00
|
|
|
b.Daemon.DeleteVolumes(tmp.VolumePaths())
|
|
|
|
delete(b.TmpContainers, c)
|
|
|
|
fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c))
|
2014-08-05 18:41:09 -04:00
|
|
|
}
|
|
|
|
}
|