2018-02-05 16:05:59 -05:00
|
|
|
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
|
2017-05-07 14:37:46 -04:00
|
|
|
|
|
|
|
import (
|
2017-08-03 20:22:00 -04:00
|
|
|
"archive/tar"
|
2017-05-07 14:37:46 -04:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2017-07-21 19:54:29 -04:00
|
|
|
"mime"
|
2017-05-07 14:37:46 -04:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2017-08-03 20:22:00 -04:00
|
|
|
"runtime"
|
2017-05-07 14:37:46 -04:00
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/docker/docker/builder"
|
|
|
|
"github.com/docker/docker/builder/remotecontext"
|
pkg/urlutil: deprecate, and move to builder/remotecontext/urlutil
pkg/urlutil (despite its poorly chosen name) is not really intended as a generic
utility to handle URLs, and should only be used by the builder to handle (remote)
build contexts.
- IsURL() only does a very rudimentary check for http(s):// prefixes, without any
other validation, but due to its name may give incorrect expectations.
- IsGitURL() is written specifically with docker build remote git contexts in
mind, and has handling for backward-compatibility, where strings that are
not URLs, but start with "github.com/" are accepted.
Because of the above, this patch:
- moves the package inside builder/remotecontext, close to where it's intended
to be used (ideally this would be part of build/remotecontext itself, but this
package imports many other dependencies, which would introduce those as extra
dependencies in the CLI).
- deprecates pkg/urlutil, but adds aliases as there are some external consumers.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2022-04-10 10:57:45 -04:00
|
|
|
"github.com/docker/docker/builder/remotecontext/urlutil"
|
2017-05-14 14:18:48 -04:00
|
|
|
"github.com/docker/docker/pkg/archive"
|
2017-08-03 20:22:00 -04:00
|
|
|
"github.com/docker/docker/pkg/containerfs"
|
2017-05-14 14:18:48 -04:00
|
|
|
"github.com/docker/docker/pkg/idtools"
|
2017-05-07 14:37:46 -04:00
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
|
|
|
"github.com/docker/docker/pkg/progress"
|
|
|
|
"github.com/docker/docker/pkg/streamformatter"
|
|
|
|
"github.com/docker/docker/pkg/system"
|
2021-06-05 15:56:40 -04:00
|
|
|
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
2018-06-26 03:39:25 -04:00
|
|
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
2017-05-07 14:37:46 -04:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
)
|
|
|
|
|
2017-07-21 19:54:29 -04:00
|
|
|
const unnamedFilename = "__unnamed__"
|
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
type pathCache interface {
|
|
|
|
Load(key interface{}) (value interface{}, ok bool)
|
|
|
|
Store(key, value interface{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// copyInfo is a data object which stores the metadata about each source file in
|
|
|
|
// a copyInstruction
|
|
|
|
type copyInfo struct {
|
2017-08-03 20:22:00 -04:00
|
|
|
root containerfs.ContainerFS
|
2017-06-27 13:38:01 -04:00
|
|
|
path string
|
|
|
|
hash string
|
|
|
|
noDecompress bool
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
|
2017-05-14 14:18:48 -04:00
|
|
|
func (c copyInfo) fullPath() (string, error) {
|
2017-08-03 20:22:00 -04:00
|
|
|
return c.root.ResolveScopedPath(c.path, true)
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo {
|
|
|
|
return copyInfo{root: source.Root(), path: path, hash: hash}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newCopyInfos(copyInfos ...copyInfo) []copyInfo {
|
|
|
|
return copyInfos
|
|
|
|
}
|
|
|
|
|
|
|
|
// copyInstruction is a fully parsed COPY or ADD command that is passed to
|
|
|
|
// Builder.performCopy to copy files into the image filesystem
|
|
|
|
type copyInstruction struct {
|
|
|
|
cmdName string
|
|
|
|
infos []copyInfo
|
|
|
|
dest string
|
2017-07-26 12:05:55 -04:00
|
|
|
chownStr string
|
2017-05-07 14:37:46 -04:00
|
|
|
allowLocalDecompression bool
|
2018-12-18 16:30:08 -05:00
|
|
|
preserveOwnership bool
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// copier reads a raw COPY or ADD command, fetches remote sources using a downloader,
|
|
|
|
// and creates a copyInstruction
|
|
|
|
type copier struct {
|
|
|
|
imageSource *imageMount
|
|
|
|
source builder.Source
|
|
|
|
pathCache pathCache
|
|
|
|
download sourceDownloader
|
2018-06-26 16:04:35 -04:00
|
|
|
platform *specs.Platform
|
2018-02-16 16:50:57 -05:00
|
|
|
// for cleanup. TODO: having copier.cleanup() is error prone and hard to
|
|
|
|
// follow. Code calling performCopy should manage the lifecycle of its params.
|
|
|
|
// Copier should take override source as input, not imageMount.
|
|
|
|
activeLayer builder.RWLayer
|
|
|
|
tmpPaths []string
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
|
2018-07-30 15:49:33 -04:00
|
|
|
platform := req.builder.platform
|
|
|
|
if platform == nil {
|
|
|
|
// May be nil if not explicitly set in API/dockerfile
|
|
|
|
platform = &specs.Platform{}
|
|
|
|
}
|
|
|
|
if platform.OS == "" {
|
|
|
|
// Default to the dispatch requests operating system if not explicit in API/dockerfile
|
|
|
|
platform.OS = req.state.operatingSystem
|
|
|
|
}
|
|
|
|
if platform.OS == "" {
|
|
|
|
// This is a failsafe just in case. Shouldn't be hit.
|
|
|
|
platform.OS = runtime.GOOS
|
|
|
|
}
|
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
return copier{
|
|
|
|
source: req.source,
|
|
|
|
pathCache: req.builder.pathCache,
|
|
|
|
download: download,
|
|
|
|
imageSource: imageSource,
|
2018-07-30 15:49:33 -04:00
|
|
|
platform: platform,
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-05 15:56:40 -04:00
|
|
|
func (o *copier) createCopyInstruction(sourcesAndDest instructions.SourcesAndDest, cmdName string) (copyInstruction, error) {
|
2021-07-27 08:58:22 -04:00
|
|
|
inst := copyInstruction{
|
|
|
|
cmdName: cmdName,
|
|
|
|
dest: filepath.FromSlash(sourcesAndDest.DestPath),
|
2018-06-26 17:49:33 -04:00
|
|
|
}
|
2021-06-05 15:56:40 -04:00
|
|
|
infos, err := o.getCopyInfosForSourcePaths(sourcesAndDest.SourcePaths, inst.dest)
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return inst, errors.Wrapf(err, "%s failed", cmdName)
|
|
|
|
}
|
2021-07-27 08:58:22 -04:00
|
|
|
if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) {
|
2017-05-07 14:37:46 -04:00
|
|
|
return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
|
|
|
}
|
|
|
|
inst.infos = infos
|
|
|
|
return inst, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getCopyInfosForSourcePaths iterates over the source files and calculate the info
|
|
|
|
// needed to copy (e.g. hash value if cached)
|
2017-07-21 19:54:29 -04:00
|
|
|
// The dest is used in case source is URL (and ends with "/")
|
|
|
|
func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) {
|
2017-05-07 14:37:46 -04:00
|
|
|
var infos []copyInfo
|
|
|
|
for _, orig := range sources {
|
2017-07-21 19:54:29 -04:00
|
|
|
subinfos, err := o.getCopyInfoForSourcePath(orig, dest)
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
infos = append(infos, subinfos...)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(infos) == 0 {
|
|
|
|
return nil, errors.New("no source files were specified")
|
|
|
|
}
|
|
|
|
return infos, nil
|
|
|
|
}
|
|
|
|
|
2017-07-21 19:54:29 -04:00
|
|
|
func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) {
|
2017-05-07 14:37:46 -04:00
|
|
|
if !urlutil.IsURL(orig) {
|
|
|
|
return o.calcCopyInfo(orig, true)
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
remote, path, err := o.download(orig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-07-21 19:54:29 -04:00
|
|
|
// If path == "" then we are unable to determine filename from src
|
|
|
|
// We have to make sure dest is available
|
|
|
|
if path == "" {
|
|
|
|
if strings.HasSuffix(dest, "/") {
|
|
|
|
return nil, errors.Errorf("cannot determine filename for source %s", orig)
|
|
|
|
}
|
|
|
|
path = unnamedFilename
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
o.tmpPaths = append(o.tmpPaths, remote.Root().Path())
|
2017-05-07 14:37:46 -04:00
|
|
|
|
|
|
|
hash, err := remote.Hash(path)
|
2017-06-27 13:38:01 -04:00
|
|
|
ci := newCopyInfoFromSource(remote, path, hash)
|
|
|
|
ci.noDecompress = true // data from http shouldn't be extracted even on ADD
|
|
|
|
return newCopyInfos(ci), err
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Cleanup removes any temporary directories created as part of downloading
|
|
|
|
// remote files.
|
|
|
|
func (o *copier) Cleanup() {
|
|
|
|
for _, path := range o.tmpPaths {
|
|
|
|
os.RemoveAll(path)
|
|
|
|
}
|
|
|
|
o.tmpPaths = []string{}
|
2018-02-16 16:50:57 -05:00
|
|
|
if o.activeLayer != nil {
|
|
|
|
o.activeLayer.Release()
|
|
|
|
o.activeLayer = nil
|
|
|
|
}
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: allowWildcards can probably be removed by refactoring this function further.
|
|
|
|
func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) {
|
|
|
|
imageSource := o.imageSource
|
2021-07-27 08:24:14 -04:00
|
|
|
if err := validateCopySourcePath(imageSource, origPath); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-05-07 14:37:46 -04:00
|
|
|
|
|
|
|
// TODO: do this when creating copier. Requires validateCopySourcePath
|
|
|
|
// (and other below) to be aware of the difference sources. Why is it only
|
|
|
|
// done on image Source?
|
2018-05-30 17:41:17 -04:00
|
|
|
if imageSource != nil && o.activeLayer == nil {
|
|
|
|
// this needs to be protected against repeated calls as wildcard copy
|
|
|
|
// will call it multiple times for a single COPY
|
2017-05-07 14:37:46 -04:00
|
|
|
var err error
|
2018-02-16 16:50:57 -05:00
|
|
|
rwLayer, err := imageSource.NewRWLayer()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
o.activeLayer = rwLayer
|
|
|
|
|
|
|
|
o.source, err = remotecontext.NewLazySource(rwLayer.Root())
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
2018-02-16 16:50:57 -05:00
|
|
|
return nil, errors.Wrapf(err, "failed to create context for copy from %s", rwLayer.Root().Path())
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if o.source == nil {
|
|
|
|
return nil, errors.Errorf("missing build context")
|
|
|
|
}
|
|
|
|
|
2021-07-27 08:24:14 -04:00
|
|
|
// Work in daemon-specific OS filepath semantics
|
|
|
|
origPath = filepath.FromSlash(origPath)
|
|
|
|
origPath = strings.TrimPrefix(origPath, string(os.PathSeparator))
|
|
|
|
origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
|
2017-08-03 20:22:00 -04:00
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
// Deal with wildcards
|
2021-07-27 08:24:14 -04:00
|
|
|
if allowWildcards && containsWildcards(origPath) {
|
2017-05-07 14:37:46 -04:00
|
|
|
return o.copyWithWildcards(origPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
if imageSource != nil && imageSource.ImageID() != "" {
|
|
|
|
// return a cached copy if one exists
|
|
|
|
if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok {
|
|
|
|
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deal with the single file case
|
|
|
|
copyInfo, err := copyInfoForFile(o.source, origPath)
|
|
|
|
switch {
|
Builder: print relative path if COPY/ADD source path was not found
Before this change, the error returned to the user would include the physical
path inside the tmp dir on the daemon host. These paths should be considered
an implementation detail, and provide no value to the user. Printing the tmp
path can confuse users, and will be even more confusing if the daemon is running
remotely (or in a VM, such as on Docker Desktop), in which case the path in the
error message does not exist on the local machine;
echo -e "FROM busybox\nCOPY /some/non-existing/file.txt ." | DOCKER_BUILDKIT=0 docker build -f- .
Sending build context to Docker daemon 1.57kB
Step 1/2 : FROM busybox
---> 1c35c4412082
Step 2/2 : COPY /some/non-existing/file.txt .
COPY failed: stat /var/lib/docker/tmp/docker-builder405687992/some/non-existing/file.txt: no such file or directory
When copying files from an image or a build stage, using `--from`, the error
is similarly confusing:
echo -e "FROM busybox\nCOPY --from=busybox /some/non-existing/file.txt ." | DOCKER_BUILDKIT=0 docker build -f- .
Sending build context to Docker daemon 4.671kB
Step 1/2 : FROM busybox
---> 018c9d7b792b
Step 2/2 : COPY --from=busybox /some/non-existing/file.txt .
COPY failed: stat /var/lib/docker/overlay2/ef34239c80526c779b7afaeaedbf11c1b201d7f7681d45613102c4541da0e156/merged/some/non-existing/file.txt: no such file or directory
This patch updates the error messages to be more user-friendly. Changes are slightly
different, depending on if the source was a local path, or an image (or build-stage),
using `--from`.
If `--from` is used, only the path is updated, and we print the relative path
instead of the full path;
echo -e "FROM busybox\nCOPY --from=busybox /some/non-existing/file.txt ." | DOCKER_BUILDKIT=0 docker build -f- .
Sending build context to Docker daemon 1.583kB
Step 1/2 : FROM busybox
---> 018c9d7b792b
Step 2/2 : COPY --from=busybox /some/non-existing/file.txt .
COPY failed: stat some/non-existing/file.txt: file does not exist
In other cases, additional information is added to mention "build context" and
".dockerignore", which could provide the user some hints to find the problem:
echo -e "FROM busybox\nCOPY /some/non-existing/file.txt ." | DOCKER_BUILDKIT=0 docker build -f- .
Sending build context to Docker daemon 1.583kB
Step 1/2 : FROM busybox
---> 018c9d7b792b
Step 2/2 : COPY /some/non-existing/file.txt .
COPY failed: file not found in build context or excluded by .dockerignore: stat some/non-existing/file.txt: file does not exist
echo -e "FROM busybox\nADD /some/non-existing/file.txt ." | DOCKER_BUILDKIT=0 docker build -f- .
Sending build context to Docker daemon 1.583kB
Step 1/2 : FROM busybox
---> 018c9d7b792b
Step 2/2 : ADD /some/non-existing/file.txt .
ADD failed: file not found in build context or excluded by .dockerignore: stat some/non-existing/file.txt: file does not exist
This patch only improves the error for the classic builder. Similar changes could
be made for BuildKit, which produces equally, or even more confusing errors;
echo -e "FROM busybox\nCOPY /some/non-existing/file.txt ." | DOCKER_BUILDKIT=1 docker build -f- .
[+] Building 1.2s (6/6) FINISHED
=> [internal] load build definition from Dockerfile 0.0s
=> => transferring dockerfile: 85B 0.0s
=> [internal] load .dockerignore 0.0s
=> => transferring context: 2B 0.0s
=> [internal] load metadata for docker.io/library/busybox:latest 1.2s
=> [internal] load build context 0.0s
=> => transferring context: 2B 0.0s
=> CACHED [1/2] FROM docker.io/library/busybox@sha256:4f47c01... 0.0s
=> ERROR [2/2] COPY /some/non-existing/file.txt . 0.0s
------
> [2/2] COPY /some/non-existing/file.txt .:
------
failed to compute cache key: failed to walk /var/lib/docker/tmp/buildkit-mount181923793/some/non-existing:
lstat /var/lib/docker/tmp/buildkit-mount181923793/some/non-existing: no such file or directory
echo -e "FROM busybox\nCOPY --from=busybox /some/non-existing/file.txt ." | DOCKER_BUILDKIT=1 docker build -f- .
[+] Building 2.5s (6/6) FINISHED
=> [internal] load build definition from Dockerfile 0.0s
=> => transferring dockerfile: 100B 0.0s
=> [internal] load .dockerignore 0.0s
=> => transferring context: 2B 0.0s
=> [internal] load metadata for docker.io/library/busybox:latest 1.2s
=> FROM docker.io/library/busybox:latest 1.2s
=> => resolve docker.io/library/busybox:latest 1.2s
=> CACHED [stage-0 1/2] FROM docker.io/library/busybox@sha256:4f47c01... 0.0s
=> ERROR [stage-0 2/2] COPY --from=busybox /some/non-existing/file.txt . 0.0s
------
> [stage-0 2/2] COPY --from=busybox /some/non-existing/file.txt .:
------
failed to compute cache key: failed to walk /var/lib/docker/overlay2/2a796d91e46fc038648c6010f062bdfd612ee62b0e8fe77bc632688e3fba32d9/merged/some/non-existing:
lstat /var/lib/docker/overlay2/2a796d91e46fc038648c6010f062bdfd612ee62b0e8fe77bc632688e3fba32d9/merged/some/non-existing: no such file or directory
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-07 15:31:31 -04:00
|
|
|
case imageSource == nil && errors.Is(err, os.ErrNotExist):
|
|
|
|
return nil, errors.Wrapf(err, "file not found in build context or excluded by .dockerignore")
|
2017-05-07 14:37:46 -04:00
|
|
|
case err != nil:
|
|
|
|
return nil, err
|
|
|
|
case copyInfo.hash != "":
|
|
|
|
o.storeInPathCache(imageSource, origPath, copyInfo.hash)
|
|
|
|
return newCopyInfos(copyInfo), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: remove, handle dirs in Hash()
|
|
|
|
subfiles, err := walkSource(o.source, origPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
hash := hashStringSlice("dir", subfiles)
|
|
|
|
o.storeInPathCache(imageSource, origPath, hash)
|
|
|
|
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
|
|
|
|
if im != nil {
|
|
|
|
o.pathCache.Store(im.ImageID()+path, hash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
|
2017-08-03 20:22:00 -04:00
|
|
|
root := o.source.Root()
|
2017-05-07 14:37:46 -04:00
|
|
|
var copyInfos []copyInfo
|
2017-08-03 20:22:00 -04:00
|
|
|
if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error {
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
rel, err := remotecontext.Rel(root, path)
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if rel == "." {
|
|
|
|
return nil
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
if match, _ := root.Match(origPath, rel); !match {
|
2017-05-07 14:37:46 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note we set allowWildcards to false in case the name has
|
|
|
|
// a * in it
|
|
|
|
subInfos, err := o.calcCopyInfo(rel, false)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
copyInfos = append(copyInfos, subInfos...)
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return copyInfos, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyInfoForFile(source builder.Source, path string) (copyInfo, error) {
|
|
|
|
fi, err := remotecontext.StatAt(source, path)
|
|
|
|
if err != nil {
|
Builder: print relative path if COPY/ADD source path was not found
Before this change, the error returned to the user would include the physical
path inside the tmp dir on the daemon host. These paths should be considered
an implementation detail, and provide no value to the user. Printing the tmp
path can confuse users, and will be even more confusing if the daemon is running
remotely (or in a VM, such as on Docker Desktop), in which case the path in the
error message does not exist on the local machine;
echo -e "FROM busybox\nCOPY /some/non-existing/file.txt ." | DOCKER_BUILDKIT=0 docker build -f- .
Sending build context to Docker daemon 1.57kB
Step 1/2 : FROM busybox
---> 1c35c4412082
Step 2/2 : COPY /some/non-existing/file.txt .
COPY failed: stat /var/lib/docker/tmp/docker-builder405687992/some/non-existing/file.txt: no such file or directory
When copying files from an image or a build stage, using `--from`, the error
is similarly confusing:
echo -e "FROM busybox\nCOPY --from=busybox /some/non-existing/file.txt ." | DOCKER_BUILDKIT=0 docker build -f- .
Sending build context to Docker daemon 4.671kB
Step 1/2 : FROM busybox
---> 018c9d7b792b
Step 2/2 : COPY --from=busybox /some/non-existing/file.txt .
COPY failed: stat /var/lib/docker/overlay2/ef34239c80526c779b7afaeaedbf11c1b201d7f7681d45613102c4541da0e156/merged/some/non-existing/file.txt: no such file or directory
This patch updates the error messages to be more user-friendly. Changes are slightly
different, depending on if the source was a local path, or an image (or build-stage),
using `--from`.
If `--from` is used, only the path is updated, and we print the relative path
instead of the full path;
echo -e "FROM busybox\nCOPY --from=busybox /some/non-existing/file.txt ." | DOCKER_BUILDKIT=0 docker build -f- .
Sending build context to Docker daemon 1.583kB
Step 1/2 : FROM busybox
---> 018c9d7b792b
Step 2/2 : COPY --from=busybox /some/non-existing/file.txt .
COPY failed: stat some/non-existing/file.txt: file does not exist
In other cases, additional information is added to mention "build context" and
".dockerignore", which could provide the user some hints to find the problem:
echo -e "FROM busybox\nCOPY /some/non-existing/file.txt ." | DOCKER_BUILDKIT=0 docker build -f- .
Sending build context to Docker daemon 1.583kB
Step 1/2 : FROM busybox
---> 018c9d7b792b
Step 2/2 : COPY /some/non-existing/file.txt .
COPY failed: file not found in build context or excluded by .dockerignore: stat some/non-existing/file.txt: file does not exist
echo -e "FROM busybox\nADD /some/non-existing/file.txt ." | DOCKER_BUILDKIT=0 docker build -f- .
Sending build context to Docker daemon 1.583kB
Step 1/2 : FROM busybox
---> 018c9d7b792b
Step 2/2 : ADD /some/non-existing/file.txt .
ADD failed: file not found in build context or excluded by .dockerignore: stat some/non-existing/file.txt: file does not exist
This patch only improves the error for the classic builder. Similar changes could
be made for BuildKit, which produces equally, or even more confusing errors;
echo -e "FROM busybox\nCOPY /some/non-existing/file.txt ." | DOCKER_BUILDKIT=1 docker build -f- .
[+] Building 1.2s (6/6) FINISHED
=> [internal] load build definition from Dockerfile 0.0s
=> => transferring dockerfile: 85B 0.0s
=> [internal] load .dockerignore 0.0s
=> => transferring context: 2B 0.0s
=> [internal] load metadata for docker.io/library/busybox:latest 1.2s
=> [internal] load build context 0.0s
=> => transferring context: 2B 0.0s
=> CACHED [1/2] FROM docker.io/library/busybox@sha256:4f47c01... 0.0s
=> ERROR [2/2] COPY /some/non-existing/file.txt . 0.0s
------
> [2/2] COPY /some/non-existing/file.txt .:
------
failed to compute cache key: failed to walk /var/lib/docker/tmp/buildkit-mount181923793/some/non-existing:
lstat /var/lib/docker/tmp/buildkit-mount181923793/some/non-existing: no such file or directory
echo -e "FROM busybox\nCOPY --from=busybox /some/non-existing/file.txt ." | DOCKER_BUILDKIT=1 docker build -f- .
[+] Building 2.5s (6/6) FINISHED
=> [internal] load build definition from Dockerfile 0.0s
=> => transferring dockerfile: 100B 0.0s
=> [internal] load .dockerignore 0.0s
=> => transferring context: 2B 0.0s
=> [internal] load metadata for docker.io/library/busybox:latest 1.2s
=> FROM docker.io/library/busybox:latest 1.2s
=> => resolve docker.io/library/busybox:latest 1.2s
=> CACHED [stage-0 1/2] FROM docker.io/library/busybox@sha256:4f47c01... 0.0s
=> ERROR [stage-0 2/2] COPY --from=busybox /some/non-existing/file.txt . 0.0s
------
> [stage-0 2/2] COPY --from=busybox /some/non-existing/file.txt .:
------
failed to compute cache key: failed to walk /var/lib/docker/overlay2/2a796d91e46fc038648c6010f062bdfd612ee62b0e8fe77bc632688e3fba32d9/merged/some/non-existing:
lstat /var/lib/docker/overlay2/2a796d91e46fc038648c6010f062bdfd612ee62b0e8fe77bc632688e3fba32d9/merged/some/non-existing: no such file or directory
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2020-08-07 15:31:31 -04:00
|
|
|
if errors.Is(err, os.ErrNotExist) {
|
|
|
|
// return the relative path in the error, which is more user-friendly than the full path to the tmp-dir
|
|
|
|
return copyInfo{}, errors.WithStack(&os.PathError{Op: "stat", Path: path, Err: os.ErrNotExist})
|
|
|
|
}
|
2017-05-07 14:37:46 -04:00
|
|
|
return copyInfo{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if fi.IsDir() {
|
|
|
|
return copyInfo{}, nil
|
|
|
|
}
|
|
|
|
hash, err := source.Hash(path)
|
|
|
|
if err != nil {
|
|
|
|
return copyInfo{}, err
|
|
|
|
}
|
|
|
|
return newCopyInfoFromSource(source, path, "file:"+hash), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: dedupe with copyWithWildcards()
|
|
|
|
func walkSource(source builder.Source, origPath string) ([]string, error) {
|
|
|
|
fp, err := remotecontext.FullPath(source, origPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// Must be a dir
|
|
|
|
var subfiles []string
|
2017-08-03 20:22:00 -04:00
|
|
|
err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error {
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rel, err := remotecontext.Rel(source.Root(), path)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if rel == "." {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
hash, err := source.Hash(rel)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// we already checked handleHash above
|
|
|
|
subfiles = append(subfiles, hash)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(subfiles)
|
|
|
|
return subfiles, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type sourceDownloader func(string) (builder.Source, string, error)
|
|
|
|
|
|
|
|
func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader {
|
|
|
|
return func(url string) (builder.Source, string, error) {
|
|
|
|
return downloadSource(output, stdout, url)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func errOnSourceDownload(_ string) (builder.Source, string, error) {
|
|
|
|
return nil, "", errors.New("source can't be a URL for COPY")
|
|
|
|
}
|
|
|
|
|
2017-07-21 19:54:29 -04:00
|
|
|
func getFilenameForDownload(path string, resp *http.Response) string {
|
|
|
|
// Guess filename based on source
|
|
|
|
if path != "" && !strings.HasSuffix(path, "/") {
|
|
|
|
if filename := filepath.Base(filepath.FromSlash(path)); filename != "" {
|
|
|
|
return filename
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Guess filename based on Content-Disposition
|
|
|
|
if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" {
|
|
|
|
if _, params, err := mime.ParseMediaType(contentDisposition); err == nil {
|
|
|
|
if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") {
|
|
|
|
if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" {
|
|
|
|
return filename
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) {
|
|
|
|
u, err := url.Parse(srcURL)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-06-01 17:05:44 -04:00
|
|
|
resp, err := remotecontext.GetWithStatusError(srcURL)
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-07-21 19:54:29 -04:00
|
|
|
filename := getFilenameForDownload(u.Path, resp)
|
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
// Prepare file in a tmp dir
|
|
|
|
tmpDir, err := ioutils.TempDir("", "docker-remote")
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
os.RemoveAll(tmpDir)
|
|
|
|
}
|
|
|
|
}()
|
2017-07-21 19:54:29 -04:00
|
|
|
// If filename is empty, the returned filename will be "" but
|
|
|
|
// the tmp filename will be created as "__unnamed__"
|
|
|
|
tmpFileName := filename
|
|
|
|
if filename == "" {
|
|
|
|
tmpFileName = unnamedFilename
|
|
|
|
}
|
|
|
|
tmpFileName = filepath.Join(tmpDir, tmpFileName)
|
2017-05-07 14:37:46 -04:00
|
|
|
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
progressOutput := streamformatter.NewJSONProgressOutput(output, true)
|
|
|
|
progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading")
|
|
|
|
// Download and dump result to tmp file
|
|
|
|
// TODO: add filehash directly
|
|
|
|
if _, err = io.Copy(tmpFile, progressReader); err != nil {
|
|
|
|
tmpFile.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// TODO: how important is this random blank line to the output?
|
|
|
|
fmt.Fprintln(stdout)
|
|
|
|
|
|
|
|
// Set the mtime to the Last-Modified header value if present
|
|
|
|
// Otherwise just remove atime and mtime
|
|
|
|
mTime := time.Time{}
|
|
|
|
|
|
|
|
lastMod := resp.Header.Get("Last-Modified")
|
|
|
|
if lastMod != "" {
|
|
|
|
// If we can't parse it then just let it default to 'zero'
|
|
|
|
// otherwise use the parsed time value
|
|
|
|
if parsedMTime, err := http.ParseTime(lastMod); err == nil {
|
|
|
|
mTime = parsedMTime
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tmpFile.Close()
|
|
|
|
|
|
|
|
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir))
|
2017-05-07 14:37:46 -04:00
|
|
|
return lc, filename, err
|
|
|
|
}
|
2017-05-14 14:18:48 -04:00
|
|
|
|
|
|
|
type copyFileOptions struct {
|
|
|
|
decompress bool
|
2018-12-18 16:30:08 -05:00
|
|
|
identity *idtools.Identity
|
2017-08-03 20:22:00 -04:00
|
|
|
archiver Archiver
|
|
|
|
}
|
|
|
|
|
|
|
|
type copyEndpoint struct {
|
|
|
|
driver containerfs.Driver
|
|
|
|
path string
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
|
|
|
|
2017-05-25 17:03:29 -04:00
|
|
|
func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error {
|
2017-05-14 14:18:48 -04:00
|
|
|
srcPath, err := source.fullPath()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
|
2017-05-14 14:18:48 -04:00
|
|
|
destPath, err := dest.fullPath()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
archiver := options.archiver
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
srcEndpoint := ©Endpoint{driver: source.root, path: srcPath}
|
|
|
|
destEndpoint := ©Endpoint{driver: dest.root, path: destPath}
|
|
|
|
|
|
|
|
src, err := source.root.Stat(srcPath)
|
2017-05-14 14:18:48 -04:00
|
|
|
if err != nil {
|
2017-05-25 16:02:29 -04:00
|
|
|
return errors.Wrapf(err, "source path not found")
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
|
|
|
if src.IsDir() {
|
2017-11-16 01:20:33 -05:00
|
|
|
return copyDirectory(archiver, srcEndpoint, destEndpoint, options.identity)
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress {
|
2017-05-25 17:03:29 -04:00
|
|
|
return archiver.UntarPath(srcPath, destPath)
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
destExistsAsDir, err := isExistingDirectory(destEndpoint)
|
2017-05-25 17:03:29 -04:00
|
|
|
if err != nil {
|
2017-05-14 14:18:48 -04:00
|
|
|
return err
|
|
|
|
}
|
2017-05-25 17:03:29 -04:00
|
|
|
// dest.path must be used because destPath has already been cleaned of any
|
|
|
|
// trailing slash
|
2017-08-03 20:22:00 -04:00
|
|
|
if endsInSlash(dest.root, dest.path) || destExistsAsDir {
|
2017-05-25 17:03:29 -04:00
|
|
|
// source.path must be used to get the correct filename when the source
|
|
|
|
// is a symlink
|
2017-08-03 20:22:00 -04:00
|
|
|
destPath = dest.root.Join(destPath, source.root.Base(source.path))
|
|
|
|
destEndpoint = ©Endpoint{driver: dest.root, path: destPath}
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
2017-11-16 01:20:33 -05:00
|
|
|
return copyFile(archiver, srcEndpoint, destEndpoint, options.identity)
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
func isArchivePath(driver containerfs.ContainerFS, path string) bool {
|
|
|
|
file, err := driver.Open(path)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
rdr, err := archive.DecompressStream(file)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
r := tar.NewReader(rdr)
|
|
|
|
_, err = r.Next()
|
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
|
2018-12-18 16:30:08 -05:00
|
|
|
func copyDirectory(archiver Archiver, source, dest *copyEndpoint, identity *idtools.Identity) error {
|
2017-07-26 12:05:55 -04:00
|
|
|
destExists, err := isExistingDirectory(dest)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "failed to query destination path")
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
|
|
|
|
if err := archiver.CopyWithTar(source.path, dest.path); err != nil {
|
2017-05-25 17:03:29 -04:00
|
|
|
return errors.Wrapf(err, "failed to copy directory")
|
|
|
|
}
|
2018-12-18 16:30:08 -05:00
|
|
|
if identity != nil {
|
|
|
|
return fixPermissions(source.path, dest.path, *identity, !destExists)
|
|
|
|
}
|
|
|
|
return nil
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
|
|
|
|
2018-12-18 16:30:08 -05:00
|
|
|
func copyFile(archiver Archiver, source, dest *copyEndpoint, identity *idtools.Identity) error {
|
2021-07-27 08:24:14 -04:00
|
|
|
if identity == nil {
|
|
|
|
// Use system.MkdirAll here, which is a custom version of os.MkdirAll
|
|
|
|
// modified for use on Windows to handle volume GUID paths. These paths
|
|
|
|
// are of the form \\?\Volume{<GUID>}\<path>. An example would be:
|
|
|
|
// \\?\Volume{dae8d3ac-b9a1-11e9-88eb-e8554b2ba1db}\bin\busybox.exe
|
|
|
|
if err := system.MkdirAll(filepath.Dir(dest.path), 0755); err != nil {
|
|
|
|
return err
|
2017-08-03 20:22:00 -04:00
|
|
|
}
|
|
|
|
} else {
|
2021-07-27 08:24:14 -04:00
|
|
|
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, *identity); err != nil {
|
|
|
|
return errors.Wrapf(err, "failed to create new directory")
|
2017-08-03 20:22:00 -04:00
|
|
|
}
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
|
|
|
|
if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil {
|
2017-05-25 17:03:29 -04:00
|
|
|
return errors.Wrapf(err, "failed to copy file")
|
|
|
|
}
|
2018-12-18 16:30:08 -05:00
|
|
|
if identity != nil {
|
|
|
|
return fixPermissions(source.path, dest.path, *identity, false)
|
|
|
|
}
|
|
|
|
return nil
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
func endsInSlash(driver containerfs.Driver, path string) bool {
|
|
|
|
return strings.HasSuffix(path, string(driver.Separator()))
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// isExistingDirectory returns true if the path exists and is a directory
|
2017-08-03 20:22:00 -04:00
|
|
|
func isExistingDirectory(point *copyEndpoint) (bool, error) {
|
|
|
|
destStat, err := point.driver.Stat(point.path)
|
2017-05-25 17:03:29 -04:00
|
|
|
switch {
|
2020-04-17 06:01:01 -04:00
|
|
|
case errors.Is(err, os.ErrNotExist):
|
2017-05-25 17:03:29 -04:00
|
|
|
return false, nil
|
|
|
|
case err != nil:
|
|
|
|
return false, err
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
2017-05-25 17:03:29 -04:00
|
|
|
return destStat.IsDir(), nil
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|