2014-03-11 13:40:06 -04:00
|
|
|
package server
|
2013-05-23 21:33:31 -04:00
|
|
|
|
|
|
|
import (
|
2013-12-26 19:42:05 -05:00
|
|
|
"crypto/sha256"
|
|
|
|
"encoding/hex"
|
2013-05-23 21:33:31 -04:00
|
|
|
"encoding/json"
|
2013-12-20 07:26:11 -05:00
|
|
|
"errors"
|
2013-05-23 21:33:31 -04:00
|
|
|
"fmt"
|
2013-10-31 19:57:45 -04:00
|
|
|
"github.com/dotcloud/docker/archive"
|
2014-03-13 16:58:09 -04:00
|
|
|
"github.com/dotcloud/docker/nat"
|
2014-01-03 15:13:32 -05:00
|
|
|
"github.com/dotcloud/docker/registry"
|
2014-02-11 23:04:39 -05:00
|
|
|
"github.com/dotcloud/docker/runconfig"
|
2014-03-07 21:42:29 -05:00
|
|
|
"github.com/dotcloud/docker/runtime"
|
2013-05-23 21:33:31 -04:00
|
|
|
"github.com/dotcloud/docker/utils"
|
|
|
|
"io"
|
2013-05-28 16:37:49 -04:00
|
|
|
"io/ioutil"
|
2013-07-08 03:43:22 -04:00
|
|
|
"net/url"
|
2013-05-23 21:33:31 -04:00
|
|
|
"os"
|
2013-05-28 16:37:49 -04:00
|
|
|
"path"
|
2013-12-12 00:33:15 -05:00
|
|
|
"path/filepath"
|
2013-05-23 21:33:31 -04:00
|
|
|
"reflect"
|
2013-07-09 03:01:45 -04:00
|
|
|
"regexp"
|
2013-12-26 19:02:15 -05:00
|
|
|
"sort"
|
2013-05-23 21:33:31 -04:00
|
|
|
"strings"
|
|
|
|
)
|
|
|
|
|
2013-12-20 07:26:11 -05:00
|
|
|
var (
|
|
|
|
ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
|
|
|
|
)
|
|
|
|
|
2013-05-23 21:33:31 -04:00
|
|
|
type BuildFile interface {
|
2013-06-15 12:38:18 -04:00
|
|
|
Build(io.Reader) (string, error)
|
2013-05-23 21:33:31 -04:00
|
|
|
CmdFrom(string) error
|
|
|
|
CmdRun(string) error
|
|
|
|
}
|
|
|
|
|
|
|
|
type buildFile struct {
|
2014-03-07 21:42:29 -05:00
|
|
|
runtime *runtime.Runtime
|
2013-05-23 21:33:31 -04:00
|
|
|
srv *Server
|
|
|
|
|
2013-12-26 19:02:15 -05:00
|
|
|
image string
|
|
|
|
maintainer string
|
2014-02-11 23:04:39 -05:00
|
|
|
config *runconfig.Config
|
2013-12-26 19:02:15 -05:00
|
|
|
|
|
|
|
contextPath string
|
|
|
|
context *utils.TarSum
|
|
|
|
|
2013-08-02 12:18:54 -04:00
|
|
|
verbose bool
|
|
|
|
utilizeCache bool
|
2013-09-10 14:39:47 -04:00
|
|
|
rm bool
|
2013-05-23 21:33:31 -04:00
|
|
|
|
2014-03-10 20:16:58 -04:00
|
|
|
authConfig *registry.AuthConfig
|
|
|
|
configFile *registry.ConfigFile
|
2013-12-06 17:27:10 -05:00
|
|
|
|
2013-05-23 21:33:31 -04:00
|
|
|
tmpContainers map[string]struct{}
|
|
|
|
tmpImages map[string]struct{}
|
|
|
|
|
2013-12-02 14:43:41 -05:00
|
|
|
outStream io.Writer
|
|
|
|
errStream io.Writer
|
|
|
|
|
|
|
|
// Deprecated, original writer used for ImagePull. To be removed.
|
|
|
|
outOld io.Writer
|
|
|
|
sf *utils.StreamFormatter
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
|
2013-09-10 14:39:47 -04:00
|
|
|
func (b *buildFile) clearTmp(containers map[string]struct{}) {
|
2013-05-23 21:33:31 -04:00
|
|
|
for c := range containers {
|
|
|
|
tmp := b.runtime.Get(c)
|
2014-02-12 18:23:42 -05:00
|
|
|
if err := b.runtime.Destroy(tmp); err != nil {
|
|
|
|
fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
|
|
|
|
} else {
|
|
|
|
fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c))
|
|
|
|
}
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *buildFile) CmdFrom(name string) error {
|
2014-03-07 21:42:29 -05:00
|
|
|
image, err := b.runtime.Repositories().LookupImage(name)
|
2013-05-23 21:33:31 -04:00
|
|
|
if err != nil {
|
2014-03-07 21:42:29 -05:00
|
|
|
if b.runtime.Graph().IsNotExist(err) {
|
2013-07-09 11:06:10 -04:00
|
|
|
remote, tag := utils.ParseRepositoryTag(name)
|
2014-01-03 15:13:32 -05:00
|
|
|
pullRegistryAuth := b.authConfig
|
|
|
|
if len(b.configFile.Configs) > 0 {
|
|
|
|
// The request came with a full auth config file, we prefer to use that
|
|
|
|
endpoint, _, err := registry.ResolveRepositoryName(remote)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
resolvedAuth := b.configFile.ResolveAuthConfig(endpoint)
|
|
|
|
pullRegistryAuth = &resolvedAuth
|
|
|
|
}
|
2014-01-22 16:35:35 -05:00
|
|
|
job := b.srv.Eng.Job("pull", remote, tag)
|
|
|
|
job.SetenvBool("json", b.sf.Json())
|
|
|
|
job.SetenvBool("parallel", true)
|
|
|
|
job.SetenvJson("authConfig", pullRegistryAuth)
|
|
|
|
job.Stdout.Add(b.outOld)
|
|
|
|
if err := job.Run(); err != nil {
|
2013-05-23 21:33:31 -04:00
|
|
|
return err
|
|
|
|
}
|
2014-03-07 21:42:29 -05:00
|
|
|
image, err = b.runtime.Repositories().LookupImage(name)
|
2013-05-23 21:33:31 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2013-06-04 14:00:22 -04:00
|
|
|
b.image = image.ID
|
2014-02-11 23:04:39 -05:00
|
|
|
b.config = &runconfig.Config{}
|
2013-11-04 16:20:14 -05:00
|
|
|
if image.Config != nil {
|
|
|
|
b.config = image.Config
|
|
|
|
}
|
2013-07-09 03:01:45 -04:00
|
|
|
if b.config.Env == nil || len(b.config.Env) == 0 {
|
2014-03-07 21:42:29 -05:00
|
|
|
b.config.Env = append(b.config.Env, "HOME=/", "PATH="+runtime.DefaultPathEnv)
|
2013-07-09 03:01:45 -04:00
|
|
|
}
|
2013-12-04 05:36:21 -05:00
|
|
|
// Process ONBUILD triggers if they exist
|
|
|
|
if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
|
|
|
|
fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers)
|
|
|
|
}
|
2014-02-04 13:41:37 -05:00
|
|
|
for n, step := range b.config.OnBuild {
|
2014-02-20 10:16:45 -05:00
|
|
|
splitStep := strings.Split(step, " ")
|
|
|
|
stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " "))
|
|
|
|
switch stepInstruction {
|
|
|
|
case "ONBUILD":
|
|
|
|
return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
|
|
|
|
case "MAINTAINER", "FROM":
|
|
|
|
return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
|
|
|
|
}
|
2014-02-04 13:41:37 -05:00
|
|
|
if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil {
|
2013-12-04 05:36:21 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.config.OnBuild = []string{}
|
2013-05-23 21:33:31 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-12-04 05:36:21 -05:00
|
|
|
// The ONBUILD command declares a build instruction to be executed in any future build
|
|
|
|
// using the current image as a base.
|
|
|
|
func (b *buildFile) CmdOnbuild(trigger string) error {
|
2014-02-20 10:16:45 -05:00
|
|
|
splitTrigger := strings.Split(trigger, " ")
|
|
|
|
triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " "))
|
|
|
|
switch triggerInstruction {
|
|
|
|
case "ONBUILD":
|
|
|
|
return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
|
|
|
case "MAINTAINER", "FROM":
|
|
|
|
return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
|
|
|
|
}
|
2013-12-04 05:36:21 -05:00
|
|
|
b.config.OnBuild = append(b.config.OnBuild, trigger)
|
|
|
|
return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
|
|
|
|
}
|
|
|
|
|
2013-05-23 21:33:31 -04:00
|
|
|
func (b *buildFile) CmdMaintainer(name string) error {
|
|
|
|
b.maintainer = name
|
2013-05-29 18:03:00 -04:00
|
|
|
return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
|
2013-12-12 00:33:15 -05:00
|
|
|
// probeCache checks to see if image-caching is enabled (`b.utilizeCache`)
|
|
|
|
// and if so attempts to look up the current `b.image` and `b.config` pair
|
|
|
|
// in the current server `b.srv`. If an image is found, probeCache returns
|
|
|
|
// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
|
|
|
|
// is any error, it returns `(false, err)`.
|
|
|
|
func (b *buildFile) probeCache() (bool, error) {
|
|
|
|
if b.utilizeCache {
|
|
|
|
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
|
|
|
return false, err
|
|
|
|
} else if cache != nil {
|
|
|
|
fmt.Fprintf(b.outStream, " ---> Using cache\n")
|
|
|
|
utils.Debugf("[BUILDER] Use cached version")
|
|
|
|
b.image = cache.ID
|
|
|
|
return true, nil
|
|
|
|
} else {
|
|
|
|
utils.Debugf("[BUILDER] Cache miss")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|
2013-12-12 00:33:15 -05:00
|
|
|
|
2013-05-23 21:33:31 -04:00
|
|
|
func (b *buildFile) CmdRun(args string) error {
|
|
|
|
if b.image == "" {
|
|
|
|
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
|
|
}
|
2014-02-11 23:04:39 -05:00
|
|
|
config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil)
|
2013-05-23 21:33:31 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2013-05-29 17:37:03 -04:00
|
|
|
cmd := b.config.Cmd
|
2013-05-23 21:33:31 -04:00
|
|
|
b.config.Cmd = nil
|
2014-02-11 23:04:39 -05:00
|
|
|
runconfig.Merge(b.config, config)
|
2013-05-23 21:33:31 -04:00
|
|
|
|
2013-08-04 20:11:23 -04:00
|
|
|
defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
|
|
|
|
|
2013-05-29 21:14:50 -04:00
|
|
|
utils.Debugf("Command to be executed: %v", b.config.Cmd)
|
2013-05-29 17:37:03 -04:00
|
|
|
|
2013-12-12 00:33:15 -05:00
|
|
|
hit, err := b.probeCache()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if hit {
|
|
|
|
return nil
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
|
2014-02-12 10:02:53 -05:00
|
|
|
c, err := b.create()
|
2013-05-23 21:33:31 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-02-12 10:02:53 -05:00
|
|
|
// Ensure that we keep the container mounted until the commit
|
|
|
|
// to avoid unmounting and then mounting directly again
|
|
|
|
c.Mount()
|
|
|
|
defer c.Unmount()
|
|
|
|
|
|
|
|
err = b.run(c)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := b.commit(c.ID, cmd, "run"); err != nil {
|
2013-05-29 17:37:03 -04:00
|
|
|
return err
|
|
|
|
}
|
2013-08-04 20:11:23 -04:00
|
|
|
|
2013-05-29 17:37:03 -04:00
|
|
|
return nil
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
|
2013-07-09 03:01:45 -04:00
|
|
|
func (b *buildFile) FindEnvKey(key string) int {
|
|
|
|
for k, envVar := range b.config.Env {
|
|
|
|
envParts := strings.SplitN(envVar, "=", 2)
|
|
|
|
if key == envParts[0] {
|
|
|
|
return k
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *buildFile) ReplaceEnvMatches(value string) (string, error) {
|
|
|
|
exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
|
|
|
|
if err != nil {
|
|
|
|
return value, err
|
|
|
|
}
|
|
|
|
matches := exp.FindAllString(value, -1)
|
|
|
|
for _, match := range matches {
|
|
|
|
match = match[strings.Index(match, "$"):]
|
|
|
|
matchKey := strings.Trim(match, "${}")
|
|
|
|
|
|
|
|
for _, envVar := range b.config.Env {
|
|
|
|
envParts := strings.SplitN(envVar, "=", 2)
|
|
|
|
envKey := envParts[0]
|
|
|
|
envValue := envParts[1]
|
|
|
|
|
|
|
|
if envKey == matchKey {
|
|
|
|
value = strings.Replace(value, match, envValue, -1)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return value, nil
|
|
|
|
}
|
|
|
|
|
2013-05-23 21:33:31 -04:00
|
|
|
func (b *buildFile) CmdEnv(args string) error {
|
|
|
|
tmp := strings.SplitN(args, " ", 2)
|
|
|
|
if len(tmp) != 2 {
|
|
|
|
return fmt.Errorf("Invalid ENV format")
|
|
|
|
}
|
2013-06-10 12:31:59 -04:00
|
|
|
key := strings.Trim(tmp[0], " \t")
|
|
|
|
value := strings.Trim(tmp[1], " \t")
|
2013-05-23 21:33:31 -04:00
|
|
|
|
2013-07-09 03:01:45 -04:00
|
|
|
envKey := b.FindEnvKey(key)
|
|
|
|
replacedValue, err := b.ReplaceEnvMatches(value)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
2013-07-09 03:01:45 -04:00
|
|
|
replacedVar := fmt.Sprintf("%s=%s", key, replacedValue)
|
|
|
|
|
|
|
|
if envKey >= 0 {
|
|
|
|
b.config.Env[envKey] = replacedVar
|
2013-08-07 20:23:49 -04:00
|
|
|
} else {
|
|
|
|
b.config.Env = append(b.config.Env, replacedVar)
|
2013-07-09 03:01:45 -04:00
|
|
|
}
|
|
|
|
return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar))
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
|
2013-12-27 21:06:26 -05:00
|
|
|
func (b *buildFile) buildCmdFromJson(args string) []string {
|
2013-05-23 21:33:31 -04:00
|
|
|
var cmd []string
|
|
|
|
if err := json.Unmarshal([]byte(args), &cmd); err != nil {
|
2013-12-27 21:06:26 -05:00
|
|
|
utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
|
2013-05-29 18:03:00 -04:00
|
|
|
cmd = []string{"/bin/sh", "-c", args}
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
2013-12-27 21:06:26 -05:00
|
|
|
return cmd
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *buildFile) CmdCmd(args string) error {
|
|
|
|
cmd := b.buildCmdFromJson(args)
|
|
|
|
b.config.Cmd = cmd
|
|
|
|
if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *buildFile) CmdEntrypoint(args string) error {
|
|
|
|
entrypoint := b.buildCmdFromJson(args)
|
|
|
|
b.config.Entrypoint = entrypoint
|
|
|
|
if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
|
2013-05-30 15:21:57 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *buildFile) CmdExpose(args string) error {
|
2014-03-13 16:58:09 -04:00
|
|
|
portsTab := strings.Split(args, " ")
|
|
|
|
|
|
|
|
if b.config.ExposedPorts == nil {
|
|
|
|
b.config.ExposedPorts = make(nat.PortSet)
|
|
|
|
}
|
|
|
|
ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for port := range ports {
|
|
|
|
if _, exists := b.config.ExposedPorts[port]; !exists {
|
|
|
|
b.config.ExposedPorts[port] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.config.PortSpecs = nil
|
|
|
|
|
2013-05-29 18:03:00 -04:00
|
|
|
return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
|
2013-08-13 08:02:17 -04:00
|
|
|
func (b *buildFile) CmdUser(args string) error {
|
|
|
|
b.config.User = args
|
|
|
|
return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
|
|
|
|
}
|
|
|
|
|
2013-05-30 15:10:54 -04:00
|
|
|
func (b *buildFile) CmdInsert(args string) error {
|
|
|
|
return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *buildFile) CmdCopy(args string) error {
|
|
|
|
return fmt.Errorf("COPY has been deprecated. Please use ADD instead")
|
|
|
|
}
|
|
|
|
|
2013-08-18 14:30:19 -04:00
|
|
|
func (b *buildFile) CmdWorkdir(workdir string) error {
|
2014-03-15 16:00:35 -04:00
|
|
|
if workdir[0] == '/' {
|
|
|
|
b.config.WorkingDir = workdir
|
|
|
|
} else {
|
|
|
|
if b.config.WorkingDir == "" {
|
|
|
|
b.config.WorkingDir = "/"
|
|
|
|
}
|
|
|
|
b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir)
|
|
|
|
}
|
2013-08-18 14:30:19 -04:00
|
|
|
return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
|
|
|
}
|
|
|
|
|
2013-07-03 22:33:30 -04:00
|
|
|
func (b *buildFile) CmdVolume(args string) error {
|
|
|
|
if args == "" {
|
|
|
|
return fmt.Errorf("Volume cannot be empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
var volume []string
|
|
|
|
if err := json.Unmarshal([]byte(args), &volume); err != nil {
|
|
|
|
volume = []string{args}
|
|
|
|
}
|
|
|
|
if b.config.Volumes == nil {
|
2013-11-26 12:46:06 -05:00
|
|
|
b.config.Volumes = map[string]struct{}{}
|
2013-07-03 22:33:30 -04:00
|
|
|
}
|
|
|
|
for _, v := range volume {
|
|
|
|
b.config.Volumes[v] = struct{}{}
|
|
|
|
}
|
|
|
|
if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-12-12 00:33:15 -05:00
|
|
|
func (b *buildFile) checkPathForAddition(orig string) error {
|
2013-12-26 19:02:15 -05:00
|
|
|
origPath := path.Join(b.contextPath, orig)
|
2014-01-11 07:46:11 -05:00
|
|
|
if p, err := filepath.EvalSymlinks(origPath); err != nil {
|
2014-01-14 13:42:03 -05:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return fmt.Errorf("%s: no such file or directory", orig)
|
|
|
|
}
|
2014-01-11 07:46:11 -05:00
|
|
|
return err
|
|
|
|
} else {
|
|
|
|
origPath = p
|
|
|
|
}
|
2013-12-26 19:02:15 -05:00
|
|
|
if !strings.HasPrefix(origPath, b.contextPath) {
|
2013-12-12 00:33:15 -05:00
|
|
|
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
2013-05-28 16:38:26 -04:00
|
|
|
}
|
2013-12-12 00:33:15 -05:00
|
|
|
_, err := os.Stat(origPath)
|
|
|
|
if err != nil {
|
2014-01-14 13:42:03 -05:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return fmt.Errorf("%s: no such file or directory", orig)
|
|
|
|
}
|
|
|
|
return err
|
2013-07-08 03:43:22 -04:00
|
|
|
}
|
2013-12-12 00:33:15 -05:00
|
|
|
return nil
|
2013-06-06 18:40:46 -04:00
|
|
|
}
|
2013-05-28 16:38:26 -04:00
|
|
|
|
2014-03-07 21:42:29 -05:00
|
|
|
func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error {
|
2013-12-26 19:02:15 -05:00
|
|
|
var (
|
2014-03-26 09:55:45 -04:00
|
|
|
err error
|
2013-12-26 19:02:15 -05:00
|
|
|
origPath = path.Join(b.contextPath, orig)
|
2014-03-04 04:16:09 -05:00
|
|
|
destPath = path.Join(container.RootfsPath(), dest)
|
2013-12-26 19:02:15 -05:00
|
|
|
)
|
2014-03-26 09:55:45 -04:00
|
|
|
|
|
|
|
if destPath != container.RootfsPath() {
|
|
|
|
destPath, err = utils.FollowSymlinkInScope(destPath, container.RootfsPath())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-18 23:28:49 -04:00
|
|
|
// Preserve the trailing '/'
|
2013-07-08 03:43:22 -04:00
|
|
|
if strings.HasSuffix(dest, "/") {
|
2013-06-18 23:28:49 -04:00
|
|
|
destPath = destPath + "/"
|
|
|
|
}
|
2013-05-28 18:22:01 -04:00
|
|
|
fi, err := os.Stat(origPath)
|
|
|
|
if err != nil {
|
2014-01-14 13:42:03 -05:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return fmt.Errorf("%s: no such file or directory", orig)
|
|
|
|
}
|
|
|
|
return err
|
2013-05-28 16:38:26 -04:00
|
|
|
}
|
2014-02-13 20:18:38 -05:00
|
|
|
|
2013-05-28 18:22:01 -04:00
|
|
|
if fi.IsDir() {
|
2013-10-31 19:57:45 -04:00
|
|
|
if err := archive.CopyWithTar(origPath, destPath); err != nil {
|
2013-05-28 18:22:01 -04:00
|
|
|
return err
|
|
|
|
}
|
2014-02-13 20:18:38 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// First try to unpack the source as an archive
|
|
|
|
// to support the untar feature we need to clean up the path a little bit
|
|
|
|
// because tar is very forgiving. First we need to strip off the archive's
|
|
|
|
// filename from the path but this is only added if it does not end in / .
|
|
|
|
tarDest := destPath
|
|
|
|
if strings.HasSuffix(tarDest, "/") {
|
|
|
|
tarDest = filepath.Dir(destPath)
|
|
|
|
}
|
|
|
|
|
2014-02-17 20:08:17 -05:00
|
|
|
// If we are adding a remote file, do not try to untar it
|
|
|
|
if !remote {
|
|
|
|
// try to successfully untar the orig
|
|
|
|
if err := archive.UntarPath(origPath, tarDest); err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err)
|
2014-02-13 20:18:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// If that fails, just copy it as a regular file
|
|
|
|
// but do not use all the magic path handling for the tar path
|
|
|
|
if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := archive.CopyWithTar(origPath, destPath); err != nil {
|
|
|
|
return err
|
2013-05-28 18:22:01 -04:00
|
|
|
}
|
2013-06-06 18:40:46 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *buildFile) CmdAdd(args string) error {
|
2013-12-26 19:02:15 -05:00
|
|
|
if b.context == nil {
|
2013-06-06 18:40:46 -04:00
|
|
|
return fmt.Errorf("No context given. Impossible to use ADD")
|
|
|
|
}
|
|
|
|
tmp := strings.SplitN(args, " ", 2)
|
|
|
|
if len(tmp) != 2 {
|
|
|
|
return fmt.Errorf("Invalid ADD format")
|
|
|
|
}
|
2013-07-09 03:01:45 -04:00
|
|
|
|
|
|
|
orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t"))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t"))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-06-06 18:40:46 -04:00
|
|
|
|
|
|
|
cmd := b.config.Cmd
|
|
|
|
b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)}
|
2013-06-13 21:42:27 -04:00
|
|
|
b.config.Image = b.image
|
2013-12-12 00:33:15 -05:00
|
|
|
|
2013-12-26 19:02:15 -05:00
|
|
|
var (
|
2014-01-08 17:20:50 -05:00
|
|
|
origPath = orig
|
|
|
|
destPath = dest
|
|
|
|
remoteHash string
|
2014-02-17 20:08:17 -05:00
|
|
|
isRemote bool
|
2013-12-26 19:02:15 -05:00
|
|
|
)
|
2013-12-12 00:33:15 -05:00
|
|
|
|
|
|
|
if utils.IsURL(orig) {
|
2014-02-17 20:08:17 -05:00
|
|
|
isRemote = true
|
2013-12-12 00:33:15 -05:00
|
|
|
resp, err := utils.Download(orig)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-12-26 19:02:15 -05:00
|
|
|
tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
|
2013-12-12 00:33:15 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
tmpFileName := path.Join(tmpDirName, "tmp")
|
|
|
|
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(tmpDirName)
|
|
|
|
if _, err = io.Copy(tmpFile, resp.Body); err != nil {
|
2014-01-08 17:20:50 -05:00
|
|
|
tmpFile.Close()
|
2013-12-12 00:33:15 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
|
|
|
|
tmpFile.Close()
|
|
|
|
|
2014-01-08 17:20:50 -05:00
|
|
|
// Process the checksum
|
|
|
|
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
tarSum := utils.TarSum{Reader: r, DisableCompression: true}
|
|
|
|
remoteHash = tarSum.Sum(nil)
|
2014-02-14 06:41:46 -05:00
|
|
|
r.Close()
|
2014-01-08 17:20:50 -05:00
|
|
|
|
2013-12-12 00:33:15 -05:00
|
|
|
// If the destination is a directory, figure out the filename.
|
|
|
|
if strings.HasSuffix(dest, "/") {
|
|
|
|
u, err := url.Parse(orig)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
path := u.Path
|
|
|
|
if strings.HasSuffix(path, "/") {
|
|
|
|
path = path[:len(path)-1]
|
|
|
|
}
|
|
|
|
parts := strings.Split(path, "/")
|
|
|
|
filename := parts[len(parts)-1]
|
|
|
|
if filename == "" {
|
|
|
|
return fmt.Errorf("cannot determine filename from url: %s", u)
|
|
|
|
}
|
|
|
|
destPath = dest + filename
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := b.checkPathForAddition(origPath); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hash path and check the cache
|
|
|
|
if b.utilizeCache {
|
2013-12-26 19:02:15 -05:00
|
|
|
var (
|
|
|
|
hash string
|
|
|
|
sums = b.context.GetSums()
|
|
|
|
)
|
2014-01-07 19:53:55 -05:00
|
|
|
|
2014-01-08 17:20:50 -05:00
|
|
|
if remoteHash != "" {
|
|
|
|
hash = remoteHash
|
|
|
|
} else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
|
2013-12-12 00:33:15 -05:00
|
|
|
return err
|
2013-12-26 19:02:15 -05:00
|
|
|
} else if fi.IsDir() {
|
|
|
|
var subfiles []string
|
|
|
|
for file, sum := range sums {
|
2014-01-06 22:42:57 -05:00
|
|
|
absFile := path.Join(b.contextPath, file)
|
|
|
|
absOrigPath := path.Join(b.contextPath, origPath)
|
|
|
|
if strings.HasPrefix(absFile, absOrigPath) {
|
2013-12-26 19:02:15 -05:00
|
|
|
subfiles = append(subfiles, sum)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(subfiles)
|
2013-12-26 19:42:05 -05:00
|
|
|
hasher := sha256.New()
|
|
|
|
hasher.Write([]byte(strings.Join(subfiles, ",")))
|
|
|
|
hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
|
2013-12-26 19:02:15 -05:00
|
|
|
} else {
|
2014-01-08 15:16:23 -05:00
|
|
|
if origPath[0] == '/' && len(origPath) > 1 {
|
|
|
|
origPath = origPath[1:]
|
|
|
|
}
|
|
|
|
origPath = strings.TrimPrefix(origPath, "./")
|
|
|
|
if h, ok := sums[origPath]; ok {
|
|
|
|
hash = "file:" + h
|
|
|
|
}
|
2013-12-12 00:33:15 -05:00
|
|
|
}
|
|
|
|
b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)}
|
|
|
|
hit, err := b.probeCache()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-01-07 19:53:55 -05:00
|
|
|
// If we do not have a hash, never use the cache
|
|
|
|
if hit && hash != "" {
|
2013-12-12 00:33:15 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-13 17:57:50 -04:00
|
|
|
// Create the container and start it
|
2013-10-28 19:58:59 -04:00
|
|
|
container, _, err := b.runtime.Create(b.config, "")
|
2013-06-06 18:40:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-06-13 18:08:38 -04:00
|
|
|
b.tmpContainers[container.ID] = struct{}{}
|
2013-06-06 18:40:46 -04:00
|
|
|
|
2013-12-06 06:15:14 -05:00
|
|
|
if err := container.Mount(); err != nil {
|
2013-06-06 18:40:46 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer container.Unmount()
|
|
|
|
|
2014-02-17 20:08:17 -05:00
|
|
|
if err := b.addContext(container, origPath, destPath, isRemote); err != nil {
|
2013-12-12 00:33:15 -05:00
|
|
|
return err
|
2013-06-06 18:40:46 -04:00
|
|
|
}
|
|
|
|
|
2013-06-13 18:08:38 -04:00
|
|
|
if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil {
|
2013-05-30 15:21:57 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
b.config.Cmd = cmd
|
|
|
|
return nil
|
2013-05-28 16:38:26 -04:00
|
|
|
}
|
|
|
|
|
2014-03-07 21:42:29 -05:00
|
|
|
func (b *buildFile) create() (*runtime.Container, error) {
|
2013-05-23 21:33:31 -04:00
|
|
|
if b.image == "" {
|
2014-02-12 10:02:53 -05:00
|
|
|
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
b.config.Image = b.image
|
|
|
|
|
|
|
|
// Create the container and start it
|
2013-10-28 19:58:59 -04:00
|
|
|
c, _, err := b.runtime.Create(b.config, "")
|
2013-05-23 21:33:31 -04:00
|
|
|
if err != nil {
|
2014-02-12 10:02:53 -05:00
|
|
|
return nil, err
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
2013-06-04 14:00:22 -04:00
|
|
|
b.tmpContainers[c.ID] = struct{}{}
|
2013-12-02 14:43:41 -05:00
|
|
|
fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
|
|
|
|
|
2013-07-08 03:11:45 -04:00
|
|
|
// override the entry point that may have been picked up from the base image
|
|
|
|
c.Path = b.config.Cmd[0]
|
|
|
|
c.Args = b.config.Cmd[1:]
|
|
|
|
|
2014-02-12 10:02:53 -05:00
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2014-03-07 21:42:29 -05:00
|
|
|
func (b *buildFile) run(c *runtime.Container) error {
|
2013-10-22 18:01:06 -04:00
|
|
|
var errCh chan error
|
|
|
|
|
|
|
|
if b.verbose {
|
|
|
|
errCh = utils.Go(func() error {
|
2013-12-02 14:43:41 -05:00
|
|
|
return <-c.Attach(nil, nil, b.outStream, b.errStream)
|
2013-10-22 18:01:06 -04:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2013-05-23 21:33:31 -04:00
|
|
|
//start the container
|
2013-10-31 17:58:43 -04:00
|
|
|
if err := c.Start(); err != nil {
|
2014-02-12 10:02:53 -05:00
|
|
|
return err
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
|
2013-10-22 18:01:06 -04:00
|
|
|
if errCh != nil {
|
|
|
|
if err := <-errCh; err != nil {
|
2014-02-12 10:02:53 -05:00
|
|
|
return err
|
2013-07-11 20:12:25 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-23 21:33:31 -04:00
|
|
|
// Wait for it to finish
|
|
|
|
if ret := c.Wait(); ret != 0 {
|
2013-10-12 08:14:52 -04:00
|
|
|
err := &utils.JSONError{
|
|
|
|
Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
|
|
|
|
Code: ret,
|
|
|
|
}
|
2014-02-12 10:02:53 -05:00
|
|
|
return err
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
|
2014-02-12 10:02:53 -05:00
|
|
|
return nil
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
|
2013-05-29 17:37:03 -04:00
|
|
|
// Commit the container <id> with the autorun command <autoCmd>
|
2013-05-29 18:03:00 -04:00
|
|
|
func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
2013-05-23 21:33:31 -04:00
|
|
|
if b.image == "" {
|
|
|
|
return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
|
|
|
}
|
|
|
|
b.config.Image = b.image
|
|
|
|
if id == "" {
|
2013-06-06 18:48:12 -04:00
|
|
|
cmd := b.config.Cmd
|
2013-05-29 18:03:00 -04:00
|
|
|
b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
|
2013-06-06 18:48:12 -04:00
|
|
|
defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
|
2013-05-29 19:10:11 -04:00
|
|
|
|
2013-12-12 00:33:15 -05:00
|
|
|
hit, err := b.probeCache()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if hit {
|
|
|
|
return nil
|
2013-05-29 19:10:11 -04:00
|
|
|
}
|
2013-08-02 12:18:54 -04:00
|
|
|
|
2013-10-30 14:13:10 -04:00
|
|
|
container, warnings, err := b.runtime.Create(b.config, "")
|
2013-06-04 09:51:12 -04:00
|
|
|
if err != nil {
|
2013-05-23 21:33:31 -04:00
|
|
|
return err
|
|
|
|
}
|
2013-10-30 14:13:10 -04:00
|
|
|
for _, warning := range warnings {
|
2013-12-02 14:43:41 -05:00
|
|
|
fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning)
|
2013-10-30 14:13:10 -04:00
|
|
|
}
|
2013-06-13 18:08:38 -04:00
|
|
|
b.tmpContainers[container.ID] = struct{}{}
|
2013-12-02 14:43:41 -05:00
|
|
|
fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
|
2013-06-13 18:08:38 -04:00
|
|
|
id = container.ID
|
2014-03-13 16:58:09 -04:00
|
|
|
|
2013-12-06 06:15:14 -05:00
|
|
|
if err := container.Mount(); err != nil {
|
2013-06-13 18:08:38 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer container.Unmount()
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
container := b.runtime.Get(id)
|
|
|
|
if container == nil {
|
|
|
|
return fmt.Errorf("An error occured while creating the container")
|
|
|
|
}
|
|
|
|
|
2013-05-29 17:37:03 -04:00
|
|
|
// Note: Actually copy the struct
|
|
|
|
autoConfig := *b.config
|
|
|
|
autoConfig.Cmd = autoCmd
|
2013-05-23 21:33:31 -04:00
|
|
|
// Commit the container
|
2013-09-06 20:33:05 -04:00
|
|
|
image, err := b.runtime.Commit(container, "", "", "", b.maintainer, &autoConfig)
|
2013-05-23 21:33:31 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-06-04 14:00:22 -04:00
|
|
|
b.tmpImages[image.ID] = struct{}{}
|
|
|
|
b.image = image.ID
|
2013-05-23 21:33:31 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-09-09 20:02:45 -04:00
|
|
|
// Long lines can be split with a backslash
|
2013-09-09 21:23:42 -04:00
|
|
|
var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`)
|
2013-09-09 19:42:04 -04:00
|
|
|
|
2013-06-15 12:38:18 -04:00
|
|
|
func (b *buildFile) Build(context io.Reader) (string, error) {
|
2013-12-26 19:02:15 -05:00
|
|
|
tmpdirPath, err := ioutil.TempDir("", "docker-build")
|
2013-06-15 12:38:18 -04:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2014-01-17 02:44:18 -05:00
|
|
|
|
|
|
|
decompressedStream, err := archive.DecompressStream(context)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true}
|
2013-12-26 19:02:15 -05:00
|
|
|
if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
|
2013-06-15 12:38:18 -04:00
|
|
|
return "", err
|
|
|
|
}
|
2013-12-26 19:02:15 -05:00
|
|
|
defer os.RemoveAll(tmpdirPath)
|
2014-01-07 20:46:04 -05:00
|
|
|
|
2013-12-26 19:02:15 -05:00
|
|
|
b.contextPath = tmpdirPath
|
|
|
|
filename := path.Join(tmpdirPath, "Dockerfile")
|
2013-09-09 19:42:04 -04:00
|
|
|
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
2013-06-15 12:38:18 -04:00
|
|
|
return "", fmt.Errorf("Can't build a directory with no Dockerfile")
|
2013-05-28 16:38:26 -04:00
|
|
|
}
|
2013-09-09 19:42:04 -04:00
|
|
|
fileBytes, err := ioutil.ReadFile(filename)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2013-12-20 07:26:11 -05:00
|
|
|
if len(fileBytes) == 0 {
|
|
|
|
return "", ErrDockerfileEmpty
|
|
|
|
}
|
2014-03-18 20:50:40 -04:00
|
|
|
var (
|
|
|
|
dockerfile = lineContinuation.ReplaceAllString(stripComments(fileBytes), "")
|
|
|
|
stepN = 0
|
|
|
|
)
|
2013-09-09 19:42:04 -04:00
|
|
|
for _, line := range strings.Split(dockerfile, "\n") {
|
2013-06-10 12:31:59 -04:00
|
|
|
line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n")
|
2014-03-18 20:50:40 -04:00
|
|
|
if len(line) == 0 {
|
2013-05-23 21:33:31 -04:00
|
|
|
continue
|
|
|
|
}
|
2014-02-04 13:41:37 -05:00
|
|
|
if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil {
|
2013-12-04 05:35:23 -05:00
|
|
|
return "", err
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
2013-08-09 15:56:37 -04:00
|
|
|
stepN += 1
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
if b.image != "" {
|
2013-12-02 14:43:41 -05:00
|
|
|
fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
|
2013-09-10 14:39:47 -04:00
|
|
|
if b.rm {
|
|
|
|
b.clearTmp(b.tmpContainers)
|
|
|
|
}
|
2013-05-23 21:33:31 -04:00
|
|
|
return b.image, nil
|
|
|
|
}
|
2013-12-20 08:02:10 -05:00
|
|
|
return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
|
2013-12-04 05:35:23 -05:00
|
|
|
// BuildStep parses a single build step from `instruction` and executes it in the current context.
|
2014-02-04 13:41:37 -05:00
|
|
|
func (b *buildFile) BuildStep(name, expression string) error {
|
|
|
|
fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression)
|
2013-12-04 05:35:23 -05:00
|
|
|
tmp := strings.SplitN(expression, " ", 2)
|
|
|
|
if len(tmp) != 2 {
|
|
|
|
return fmt.Errorf("Invalid Dockerfile format")
|
|
|
|
}
|
|
|
|
instruction := strings.ToLower(strings.Trim(tmp[0], " "))
|
|
|
|
arguments := strings.Trim(tmp[1], " ")
|
|
|
|
|
|
|
|
method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
|
|
|
|
if !exists {
|
|
|
|
fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
|
|
|
|
if ret != nil {
|
|
|
|
return ret.(error)
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-03-18 20:50:40 -04:00
|
|
|
func stripComments(raw []byte) string {
|
|
|
|
var (
|
|
|
|
out []string
|
|
|
|
lines = strings.Split(string(raw), "\n")
|
|
|
|
)
|
|
|
|
for _, l := range lines {
|
|
|
|
if len(l) == 0 || l[0] == '#' {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
out = append(out, l)
|
|
|
|
}
|
|
|
|
return strings.Join(out, "\n")
|
|
|
|
}
|
|
|
|
|
2014-03-10 20:16:58 -04:00
|
|
|
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
|
2013-05-23 21:33:31 -04:00
|
|
|
return &buildFile{
|
|
|
|
runtime: srv.runtime,
|
|
|
|
srv: srv,
|
2014-02-11 23:04:39 -05:00
|
|
|
config: &runconfig.Config{},
|
2013-12-02 14:43:41 -05:00
|
|
|
outStream: outStream,
|
|
|
|
errStream: errStream,
|
2013-05-23 21:33:31 -04:00
|
|
|
tmpContainers: make(map[string]struct{}),
|
|
|
|
tmpImages: make(map[string]struct{}),
|
2013-07-11 20:12:25 -04:00
|
|
|
verbose: verbose,
|
2013-08-02 12:18:54 -04:00
|
|
|
utilizeCache: utilizeCache,
|
2013-09-10 14:39:47 -04:00
|
|
|
rm: rm,
|
2013-10-12 08:14:52 -04:00
|
|
|
sf: sf,
|
2013-12-06 17:27:10 -05:00
|
|
|
authConfig: auth,
|
2014-01-03 15:13:32 -05:00
|
|
|
configFile: authConfigFile,
|
2013-12-02 14:43:41 -05:00
|
|
|
outOld: outOld,
|
2013-05-23 21:33:31 -04:00
|
|
|
}
|
|
|
|
}
|