2017-05-07 14:37:46 -04:00
|
|
|
package dockerfile
|
|
|
|
|
|
|
|
import (
|
2017-08-03 20:22:00 -04:00
|
|
|
"archive/tar"
|
2017-05-07 14:37:46 -04:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2017-07-21 19:54:29 -04:00
|
|
|
"mime"
|
2017-05-07 14:37:46 -04:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2017-08-03 20:22:00 -04:00
|
|
|
"runtime"
|
2017-05-07 14:37:46 -04:00
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/docker/docker/builder"
|
|
|
|
"github.com/docker/docker/builder/remotecontext"
|
2017-05-14 14:18:48 -04:00
|
|
|
"github.com/docker/docker/pkg/archive"
|
2017-08-03 20:22:00 -04:00
|
|
|
"github.com/docker/docker/pkg/containerfs"
|
2017-05-14 14:18:48 -04:00
|
|
|
"github.com/docker/docker/pkg/idtools"
|
2017-05-07 14:37:46 -04:00
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
|
|
|
"github.com/docker/docker/pkg/progress"
|
|
|
|
"github.com/docker/docker/pkg/streamformatter"
|
|
|
|
"github.com/docker/docker/pkg/system"
|
|
|
|
"github.com/docker/docker/pkg/urlutil"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
)
|
|
|
|
|
2017-07-21 19:54:29 -04:00
|
|
|
const unnamedFilename = "__unnamed__"
|
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
type pathCache interface {
|
|
|
|
Load(key interface{}) (value interface{}, ok bool)
|
|
|
|
Store(key, value interface{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// copyInfo is a data object which stores the metadata about each source file in
|
|
|
|
// a copyInstruction
|
|
|
|
type copyInfo struct {
|
2017-08-03 20:22:00 -04:00
|
|
|
root containerfs.ContainerFS
|
2017-06-27 13:38:01 -04:00
|
|
|
path string
|
|
|
|
hash string
|
|
|
|
noDecompress bool
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
|
2017-05-14 14:18:48 -04:00
|
|
|
func (c copyInfo) fullPath() (string, error) {
|
2017-08-03 20:22:00 -04:00
|
|
|
return c.root.ResolveScopedPath(c.path, true)
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo {
|
|
|
|
return copyInfo{root: source.Root(), path: path, hash: hash}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newCopyInfos(copyInfos ...copyInfo) []copyInfo {
|
|
|
|
return copyInfos
|
|
|
|
}
|
|
|
|
|
|
|
|
// copyInstruction is a fully parsed COPY or ADD command that is passed to
|
|
|
|
// Builder.performCopy to copy files into the image filesystem
|
|
|
|
type copyInstruction struct {
|
|
|
|
cmdName string
|
|
|
|
infos []copyInfo
|
|
|
|
dest string
|
2017-07-26 12:05:55 -04:00
|
|
|
chownStr string
|
2017-05-07 14:37:46 -04:00
|
|
|
allowLocalDecompression bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// copier reads a raw COPY or ADD command, fetches remote sources using a downloader,
|
|
|
|
// and creates a copyInstruction
|
|
|
|
type copier struct {
|
|
|
|
imageSource *imageMount
|
|
|
|
source builder.Source
|
|
|
|
pathCache pathCache
|
|
|
|
download sourceDownloader
|
|
|
|
tmpPaths []string
|
2017-08-03 20:22:00 -04:00
|
|
|
platform string
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
|
|
|
|
return copier{
|
|
|
|
source: req.source,
|
|
|
|
pathCache: req.builder.pathCache,
|
|
|
|
download: download,
|
|
|
|
imageSource: imageSource,
|
2017-08-03 20:22:00 -04:00
|
|
|
platform: req.builder.platform,
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) {
|
|
|
|
inst := copyInstruction{cmdName: cmdName}
|
|
|
|
last := len(args) - 1
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
// Work in platform-specific filepath semantics
|
|
|
|
inst.dest = fromSlash(args[last], o.platform)
|
|
|
|
separator := string(separator(o.platform))
|
2017-07-21 19:54:29 -04:00
|
|
|
infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest)
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return inst, errors.Wrapf(err, "%s failed", cmdName)
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) {
|
2017-05-07 14:37:46 -04:00
|
|
|
return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
|
|
|
}
|
|
|
|
inst.infos = infos
|
|
|
|
return inst, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getCopyInfosForSourcePaths iterates over the source files and calculate the info
|
|
|
|
// needed to copy (e.g. hash value if cached)
|
2017-07-21 19:54:29 -04:00
|
|
|
// The dest is used in case source is URL (and ends with "/")
|
|
|
|
func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) {
|
2017-05-07 14:37:46 -04:00
|
|
|
var infos []copyInfo
|
|
|
|
for _, orig := range sources {
|
2017-07-21 19:54:29 -04:00
|
|
|
subinfos, err := o.getCopyInfoForSourcePath(orig, dest)
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
infos = append(infos, subinfos...)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(infos) == 0 {
|
|
|
|
return nil, errors.New("no source files were specified")
|
|
|
|
}
|
|
|
|
return infos, nil
|
|
|
|
}
|
|
|
|
|
2017-07-21 19:54:29 -04:00
|
|
|
func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) {
|
2017-05-07 14:37:46 -04:00
|
|
|
if !urlutil.IsURL(orig) {
|
|
|
|
return o.calcCopyInfo(orig, true)
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
remote, path, err := o.download(orig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-07-21 19:54:29 -04:00
|
|
|
// If path == "" then we are unable to determine filename from src
|
|
|
|
// We have to make sure dest is available
|
|
|
|
if path == "" {
|
|
|
|
if strings.HasSuffix(dest, "/") {
|
|
|
|
return nil, errors.Errorf("cannot determine filename for source %s", orig)
|
|
|
|
}
|
|
|
|
path = unnamedFilename
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
o.tmpPaths = append(o.tmpPaths, remote.Root().Path())
|
2017-05-07 14:37:46 -04:00
|
|
|
|
|
|
|
hash, err := remote.Hash(path)
|
2017-06-27 13:38:01 -04:00
|
|
|
ci := newCopyInfoFromSource(remote, path, hash)
|
|
|
|
ci.noDecompress = true // data from http shouldn't be extracted even on ADD
|
|
|
|
return newCopyInfos(ci), err
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Cleanup removes any temporary directories created as part of downloading
|
|
|
|
// remote files.
|
|
|
|
func (o *copier) Cleanup() {
|
|
|
|
for _, path := range o.tmpPaths {
|
|
|
|
os.RemoveAll(path)
|
|
|
|
}
|
|
|
|
o.tmpPaths = []string{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: allowWildcards can probably be removed by refactoring this function further.
|
|
|
|
func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) {
|
|
|
|
imageSource := o.imageSource
|
|
|
|
|
|
|
|
// TODO: do this when creating copier. Requires validateCopySourcePath
|
|
|
|
// (and other below) to be aware of the difference sources. Why is it only
|
|
|
|
// done on image Source?
|
|
|
|
if imageSource != nil {
|
|
|
|
var err error
|
|
|
|
o.source, err = imageSource.Source()
|
|
|
|
if err != nil {
|
2017-05-25 16:02:29 -04:00
|
|
|
return nil, errors.Wrapf(err, "failed to copy from %s", imageSource.ImageID())
|
2017-05-07 14:37:46 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if o.source == nil {
|
|
|
|
return nil, errors.Errorf("missing build context")
|
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
root := o.source.Root()
|
|
|
|
|
|
|
|
if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Work in source OS specific filepath semantics
|
|
|
|
// For LCOW, this is NOT the daemon OS.
|
|
|
|
origPath = root.FromSlash(origPath)
|
|
|
|
origPath = strings.TrimPrefix(origPath, string(root.Separator()))
|
|
|
|
origPath = strings.TrimPrefix(origPath, "."+string(root.Separator()))
|
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
// Deal with wildcards
|
2017-08-03 20:22:00 -04:00
|
|
|
if allowWildcards && containsWildcards(origPath, root.OS()) {
|
2017-05-07 14:37:46 -04:00
|
|
|
return o.copyWithWildcards(origPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
if imageSource != nil && imageSource.ImageID() != "" {
|
|
|
|
// return a cached copy if one exists
|
|
|
|
if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok {
|
|
|
|
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deal with the single file case
|
|
|
|
copyInfo, err := copyInfoForFile(o.source, origPath)
|
|
|
|
switch {
|
|
|
|
case err != nil:
|
|
|
|
return nil, err
|
|
|
|
case copyInfo.hash != "":
|
|
|
|
o.storeInPathCache(imageSource, origPath, copyInfo.hash)
|
|
|
|
return newCopyInfos(copyInfo), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: remove, handle dirs in Hash()
|
|
|
|
subfiles, err := walkSource(o.source, origPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
hash := hashStringSlice("dir", subfiles)
|
|
|
|
o.storeInPathCache(imageSource, origPath, hash)
|
|
|
|
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil
|
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
func containsWildcards(name, platform string) bool {
|
|
|
|
isWindows := platform == "windows"
|
|
|
|
for i := 0; i < len(name); i++ {
|
|
|
|
ch := name[i]
|
|
|
|
if ch == '\\' && !isWindows {
|
|
|
|
i++
|
|
|
|
} else if ch == '*' || ch == '?' || ch == '[' {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
|
|
|
|
if im != nil {
|
|
|
|
o.pathCache.Store(im.ImageID()+path, hash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
|
2017-08-03 20:22:00 -04:00
|
|
|
root := o.source.Root()
|
2017-05-07 14:37:46 -04:00
|
|
|
var copyInfos []copyInfo
|
2017-08-03 20:22:00 -04:00
|
|
|
if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error {
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
rel, err := remotecontext.Rel(root, path)
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if rel == "." {
|
|
|
|
return nil
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
if match, _ := root.Match(origPath, rel); !match {
|
2017-05-07 14:37:46 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note we set allowWildcards to false in case the name has
|
|
|
|
// a * in it
|
|
|
|
subInfos, err := o.calcCopyInfo(rel, false)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
copyInfos = append(copyInfos, subInfos...)
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return copyInfos, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyInfoForFile(source builder.Source, path string) (copyInfo, error) {
|
|
|
|
fi, err := remotecontext.StatAt(source, path)
|
|
|
|
if err != nil {
|
|
|
|
return copyInfo{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if fi.IsDir() {
|
|
|
|
return copyInfo{}, nil
|
|
|
|
}
|
|
|
|
hash, err := source.Hash(path)
|
|
|
|
if err != nil {
|
|
|
|
return copyInfo{}, err
|
|
|
|
}
|
|
|
|
return newCopyInfoFromSource(source, path, "file:"+hash), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: dedupe with copyWithWildcards()
|
|
|
|
func walkSource(source builder.Source, origPath string) ([]string, error) {
|
|
|
|
fp, err := remotecontext.FullPath(source, origPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// Must be a dir
|
|
|
|
var subfiles []string
|
2017-08-03 20:22:00 -04:00
|
|
|
err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error {
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rel, err := remotecontext.Rel(source.Root(), path)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if rel == "." {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
hash, err := source.Hash(rel)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// we already checked handleHash above
|
|
|
|
subfiles = append(subfiles, hash)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(subfiles)
|
|
|
|
return subfiles, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type sourceDownloader func(string) (builder.Source, string, error)
|
|
|
|
|
|
|
|
func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader {
|
|
|
|
return func(url string) (builder.Source, string, error) {
|
|
|
|
return downloadSource(output, stdout, url)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func errOnSourceDownload(_ string) (builder.Source, string, error) {
|
|
|
|
return nil, "", errors.New("source can't be a URL for COPY")
|
|
|
|
}
|
|
|
|
|
2017-07-21 19:54:29 -04:00
|
|
|
func getFilenameForDownload(path string, resp *http.Response) string {
|
|
|
|
// Guess filename based on source
|
|
|
|
if path != "" && !strings.HasSuffix(path, "/") {
|
|
|
|
if filename := filepath.Base(filepath.FromSlash(path)); filename != "" {
|
|
|
|
return filename
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Guess filename based on Content-Disposition
|
|
|
|
if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" {
|
|
|
|
if _, params, err := mime.ParseMediaType(contentDisposition); err == nil {
|
|
|
|
if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") {
|
|
|
|
if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" {
|
|
|
|
return filename
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) {
|
|
|
|
u, err := url.Parse(srcURL)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-06-01 17:05:44 -04:00
|
|
|
resp, err := remotecontext.GetWithStatusError(srcURL)
|
2017-05-07 14:37:46 -04:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-07-21 19:54:29 -04:00
|
|
|
filename := getFilenameForDownload(u.Path, resp)
|
|
|
|
|
2017-05-07 14:37:46 -04:00
|
|
|
// Prepare file in a tmp dir
|
|
|
|
tmpDir, err := ioutils.TempDir("", "docker-remote")
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
os.RemoveAll(tmpDir)
|
|
|
|
}
|
|
|
|
}()
|
2017-07-21 19:54:29 -04:00
|
|
|
// If filename is empty, the returned filename will be "" but
|
|
|
|
// the tmp filename will be created as "__unnamed__"
|
|
|
|
tmpFileName := filename
|
|
|
|
if filename == "" {
|
|
|
|
tmpFileName = unnamedFilename
|
|
|
|
}
|
|
|
|
tmpFileName = filepath.Join(tmpDir, tmpFileName)
|
2017-05-07 14:37:46 -04:00
|
|
|
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
progressOutput := streamformatter.NewJSONProgressOutput(output, true)
|
|
|
|
progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading")
|
|
|
|
// Download and dump result to tmp file
|
|
|
|
// TODO: add filehash directly
|
|
|
|
if _, err = io.Copy(tmpFile, progressReader); err != nil {
|
|
|
|
tmpFile.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// TODO: how important is this random blank line to the output?
|
|
|
|
fmt.Fprintln(stdout)
|
|
|
|
|
|
|
|
// Set the mtime to the Last-Modified header value if present
|
|
|
|
// Otherwise just remove atime and mtime
|
|
|
|
mTime := time.Time{}
|
|
|
|
|
|
|
|
lastMod := resp.Header.Get("Last-Modified")
|
|
|
|
if lastMod != "" {
|
|
|
|
// If we can't parse it then just let it default to 'zero'
|
|
|
|
// otherwise use the parsed time value
|
|
|
|
if parsedMTime, err := http.ParseTime(lastMod); err == nil {
|
|
|
|
mTime = parsedMTime
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tmpFile.Close()
|
|
|
|
|
|
|
|
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir))
|
2017-05-07 14:37:46 -04:00
|
|
|
return lc, filename, err
|
|
|
|
}
|
2017-05-14 14:18:48 -04:00
|
|
|
|
|
|
|
type copyFileOptions struct {
|
|
|
|
decompress bool
|
2017-07-26 12:05:55 -04:00
|
|
|
chownPair idtools.IDPair
|
2017-08-03 20:22:00 -04:00
|
|
|
archiver Archiver
|
|
|
|
}
|
|
|
|
|
|
|
|
type copyEndpoint struct {
|
|
|
|
driver containerfs.Driver
|
|
|
|
path string
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
|
|
|
|
2017-05-25 17:03:29 -04:00
|
|
|
func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error {
|
2017-05-14 14:18:48 -04:00
|
|
|
srcPath, err := source.fullPath()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
|
2017-05-14 14:18:48 -04:00
|
|
|
destPath, err := dest.fullPath()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
archiver := options.archiver
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
srcEndpoint := ©Endpoint{driver: source.root, path: srcPath}
|
|
|
|
destEndpoint := ©Endpoint{driver: dest.root, path: destPath}
|
|
|
|
|
|
|
|
src, err := source.root.Stat(srcPath)
|
2017-05-14 14:18:48 -04:00
|
|
|
if err != nil {
|
2017-05-25 16:02:29 -04:00
|
|
|
return errors.Wrapf(err, "source path not found")
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
|
|
|
if src.IsDir() {
|
2017-08-03 20:22:00 -04:00
|
|
|
return copyDirectory(archiver, srcEndpoint, destEndpoint, options.chownPair)
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress {
|
2017-05-25 17:03:29 -04:00
|
|
|
return archiver.UntarPath(srcPath, destPath)
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
destExistsAsDir, err := isExistingDirectory(destEndpoint)
|
2017-05-25 17:03:29 -04:00
|
|
|
if err != nil {
|
2017-05-14 14:18:48 -04:00
|
|
|
return err
|
|
|
|
}
|
2017-05-25 17:03:29 -04:00
|
|
|
// dest.path must be used because destPath has already been cleaned of any
|
|
|
|
// trailing slash
|
2017-08-03 20:22:00 -04:00
|
|
|
if endsInSlash(dest.root, dest.path) || destExistsAsDir {
|
2017-05-25 17:03:29 -04:00
|
|
|
// source.path must be used to get the correct filename when the source
|
|
|
|
// is a symlink
|
2017-08-03 20:22:00 -04:00
|
|
|
destPath = dest.root.Join(destPath, source.root.Base(source.path))
|
|
|
|
destEndpoint = ©Endpoint{driver: dest.root, path: destPath}
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
return copyFile(archiver, srcEndpoint, destEndpoint, options.chownPair)
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
func isArchivePath(driver containerfs.ContainerFS, path string) bool {
|
|
|
|
file, err := driver.Open(path)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
rdr, err := archive.DecompressStream(file)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
r := tar.NewReader(rdr)
|
|
|
|
_, err = r.Next()
|
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyDirectory(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
|
2017-07-26 12:05:55 -04:00
|
|
|
destExists, err := isExistingDirectory(dest)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "failed to query destination path")
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
|
|
|
|
if err := archiver.CopyWithTar(source.path, dest.path); err != nil {
|
2017-05-25 17:03:29 -04:00
|
|
|
return errors.Wrapf(err, "failed to copy directory")
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
|
|
|
|
return fixPermissions(source.path, dest.path, chownPair, !destExists)
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
func copyFile(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
|
|
|
|
if runtime.GOOS == "windows" && dest.driver.OS() == "linux" {
|
|
|
|
// LCOW
|
|
|
|
if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil {
|
|
|
|
return errors.Wrapf(err, "failed to create new directory")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, chownPair); err != nil {
|
|
|
|
// Normal containers
|
|
|
|
return errors.Wrapf(err, "failed to create new directory")
|
|
|
|
}
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
|
|
|
|
if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil {
|
2017-05-25 17:03:29 -04:00
|
|
|
return errors.Wrapf(err, "failed to copy file")
|
|
|
|
}
|
2017-08-03 20:22:00 -04:00
|
|
|
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
|
|
|
|
return fixPermissions(source.path, dest.path, chownPair, false)
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
|
|
|
|
2017-08-03 20:22:00 -04:00
|
|
|
func endsInSlash(driver containerfs.Driver, path string) bool {
|
|
|
|
return strings.HasSuffix(path, string(driver.Separator()))
|
2017-05-25 17:03:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// isExistingDirectory returns true if the path exists and is a directory
|
2017-08-03 20:22:00 -04:00
|
|
|
func isExistingDirectory(point *copyEndpoint) (bool, error) {
|
|
|
|
destStat, err := point.driver.Stat(point.path)
|
2017-05-25 17:03:29 -04:00
|
|
|
switch {
|
|
|
|
case os.IsNotExist(err):
|
|
|
|
return false, nil
|
|
|
|
case err != nil:
|
|
|
|
return false, err
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|
2017-05-25 17:03:29 -04:00
|
|
|
return destStat.IsDir(), nil
|
2017-05-14 14:18:48 -04:00
|
|
|
}
|