2013-03-21 20:47:23 -04:00
|
|
|
package docker
|
2013-03-11 08:42:36 -04:00
|
|
|
|
|
|
|
import (
|
2013-06-20 23:20:16 -04:00
|
|
|
"archive/tar"
|
|
|
|
"bytes"
|
2013-05-28 16:37:49 -04:00
|
|
|
"fmt"
|
2013-06-18 23:28:49 -04:00
|
|
|
"github.com/dotcloud/docker/utils"
|
2013-03-11 08:42:36 -04:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2013-04-21 17:23:55 -04:00
|
|
|
"os"
|
2013-03-11 08:42:36 -04:00
|
|
|
"os/exec"
|
2013-06-18 23:28:49 -04:00
|
|
|
"path"
|
2013-06-20 23:20:16 -04:00
|
|
|
"path/filepath"
|
2013-03-11 08:42:36 -04:00
|
|
|
)
|
|
|
|
|
2013-03-18 03:15:35 -04:00
|
|
|
type Archive io.Reader
|
|
|
|
|
2013-03-11 08:42:36 -04:00
|
|
|
type Compression uint32
|
|
|
|
|
|
|
|
const (
|
|
|
|
Uncompressed Compression = iota
|
|
|
|
Bzip2
|
|
|
|
Gzip
|
2013-04-01 19:16:28 -04:00
|
|
|
Xz
|
2013-03-11 08:42:36 -04:00
|
|
|
)
|
|
|
|
|
2013-06-13 20:53:38 -04:00
|
|
|
func DetectCompression(source []byte) Compression {
|
|
|
|
sourceLen := len(source)
|
|
|
|
for compression, m := range map[Compression][]byte{
|
|
|
|
Bzip2: {0x42, 0x5A, 0x68},
|
|
|
|
Gzip: {0x1F, 0x8B, 0x08},
|
|
|
|
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
|
|
|
|
} {
|
|
|
|
fail := false
|
|
|
|
if len(m) > sourceLen {
|
|
|
|
utils.Debugf("Len too short")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
i := 0
|
|
|
|
for _, b := range m {
|
|
|
|
if b != source[i] {
|
|
|
|
fail = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
if !fail {
|
|
|
|
return compression
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Uncompressed
|
|
|
|
}
|
|
|
|
|
2013-03-11 08:42:36 -04:00
|
|
|
func (compression *Compression) Flag() string {
|
|
|
|
switch *compression {
|
|
|
|
case Bzip2:
|
|
|
|
return "j"
|
|
|
|
case Gzip:
|
|
|
|
return "z"
|
2013-04-01 19:16:28 -04:00
|
|
|
case Xz:
|
|
|
|
return "J"
|
2013-03-11 08:42:36 -04:00
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2013-05-28 16:37:49 -04:00
|
|
|
func (compression *Compression) Extension() string {
|
|
|
|
switch *compression {
|
|
|
|
case Uncompressed:
|
|
|
|
return "tar"
|
|
|
|
case Bzip2:
|
|
|
|
return "tar.bz2"
|
|
|
|
case Gzip:
|
|
|
|
return "tar.gz"
|
|
|
|
case Xz:
|
|
|
|
return "tar.xz"
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2013-06-18 23:28:49 -04:00
|
|
|
// Tar creates an archive from the directory at `path`, and returns it as a
|
|
|
|
// stream of bytes.
|
2013-03-11 08:42:36 -04:00
|
|
|
func Tar(path string, compression Compression) (io.Reader, error) {
|
2013-06-18 23:28:49 -04:00
|
|
|
return TarFilter(path, compression, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tar creates an archive from the directory at `path`, only including files whose relative
|
|
|
|
// paths are included in `filter`. If `filter` is nil, then all files are included.
|
|
|
|
func TarFilter(path string, compression Compression, filter []string) (io.Reader, error) {
|
2013-06-22 11:53:10 -04:00
|
|
|
args := []string{"tar", "--numeric-owner", "-f", "-", "-C", path}
|
2013-06-18 23:28:49 -04:00
|
|
|
if filter == nil {
|
|
|
|
filter = []string{"."}
|
|
|
|
}
|
|
|
|
for _, f := range filter {
|
|
|
|
args = append(args, "-c"+compression.Flag(), f)
|
|
|
|
}
|
2013-06-19 13:31:50 -04:00
|
|
|
return CmdStream(exec.Command(args[0], args[1:]...))
|
2013-03-11 08:42:36 -04:00
|
|
|
}
|
|
|
|
|
2013-06-18 23:28:49 -04:00
|
|
|
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
|
|
|
// and unpacks it into the directory at `path`.
|
|
|
|
// The archive may be compressed with one of the following algorithgms:
|
|
|
|
// identity (uncompressed), gzip, bzip2, xz.
|
2013-06-14 19:43:39 -04:00
|
|
|
// FIXME: specify behavior when target path exists vs. doesn't exist.
|
2013-03-11 08:42:36 -04:00
|
|
|
func Untar(archive io.Reader, path string) error {
|
2013-06-22 15:29:42 -04:00
|
|
|
if archive == nil {
|
|
|
|
return fmt.Errorf("Empty archive")
|
|
|
|
}
|
2013-07-08 16:30:03 -04:00
|
|
|
|
|
|
|
buf := make([]byte, 10)
|
|
|
|
totalN := 0
|
|
|
|
for totalN < 10 {
|
|
|
|
if n, err := archive.Read(buf[totalN:]); err != nil {
|
|
|
|
if err == io.EOF {
|
|
|
|
return fmt.Errorf("Tarball too short")
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
} else {
|
|
|
|
totalN += n
|
|
|
|
utils.Debugf("[tar autodetect] n: %d", n)
|
|
|
|
}
|
2013-06-13 20:53:38 -04:00
|
|
|
}
|
|
|
|
compression := DetectCompression(buf)
|
|
|
|
|
|
|
|
utils.Debugf("Archive compression detected: %s", compression.Extension())
|
|
|
|
|
2013-06-22 11:53:10 -04:00
|
|
|
cmd := exec.Command("tar", "--numeric-owner", "-f", "-", "-C", path, "-x"+compression.Flag())
|
2013-07-08 16:30:03 -04:00
|
|
|
cmd.Stdin = io.MultiReader(bytes.NewReader(buf), archive)
|
2013-06-01 01:25:48 -04:00
|
|
|
// Hardcode locale environment for predictable outcome regardless of host configuration.
|
|
|
|
// (see https://github.com/dotcloud/docker/issues/355)
|
|
|
|
cmd.Env = []string{"LANG=en_US.utf-8", "LC_ALL=en_US.utf-8"}
|
2013-03-11 08:42:36 -04:00
|
|
|
output, err := cmd.CombinedOutput()
|
|
|
|
if err != nil {
|
2013-05-28 16:37:49 -04:00
|
|
|
return fmt.Errorf("%s: %s", err, output)
|
2013-03-11 08:42:36 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-06-18 23:28:49 -04:00
|
|
|
// TarUntar is a convenience function which calls Tar and Untar, with
|
|
|
|
// the output of one piped into the other. If either Tar or Untar fails,
|
|
|
|
// TarUntar aborts and returns the error.
|
|
|
|
func TarUntar(src string, filter []string, dst string) error {
|
|
|
|
utils.Debugf("TarUntar(%s %s %s)", src, filter, dst)
|
|
|
|
archive, err := TarFilter(src, Uncompressed, filter)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return Untar(archive, dst)
|
|
|
|
}
|
|
|
|
|
2013-06-14 19:43:39 -04:00
|
|
|
// UntarPath is a convenience function which looks for an archive
|
|
|
|
// at filesystem path `src`, and unpacks it at `dst`.
|
|
|
|
func UntarPath(src, dst string) error {
|
|
|
|
if archive, err := os.Open(src); err != nil {
|
|
|
|
return err
|
|
|
|
} else if err := Untar(archive, dst); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CopyWithTar creates a tar archive of filesystem path `src`, and
|
|
|
|
// unpacks it at filesystem path `dst`.
|
|
|
|
// The archive is streamed directly with fixed buffering and no
|
|
|
|
// intermediary disk IO.
|
|
|
|
//
|
|
|
|
func CopyWithTar(src, dst string) error {
|
2013-06-18 23:28:49 -04:00
|
|
|
srcSt, err := os.Stat(src)
|
2013-06-14 19:43:39 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-06-20 23:20:16 -04:00
|
|
|
if !srcSt.IsDir() {
|
|
|
|
return CopyFileWithTar(src, dst)
|
|
|
|
}
|
|
|
|
// Create dst, copy src's content into it
|
|
|
|
utils.Debugf("Creating dest directory: %s", dst)
|
|
|
|
if err := os.MkdirAll(dst, 0700); err != nil && !os.IsExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
utils.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
|
|
|
return TarUntar(src, nil, dst)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
|
|
|
// for a single file. It copies a regular file from path `src` to
|
|
|
|
// path `dst`, and preserves all its metadata.
|
|
|
|
//
|
|
|
|
// If `dst` ends with a trailing slash '/', the final destination path
|
|
|
|
// will be `dst/base(src)`.
|
|
|
|
func CopyFileWithTar(src, dst string) error {
|
|
|
|
utils.Debugf("CopyFileWithTar(%s, %s)", src, dst)
|
|
|
|
srcSt, err := os.Stat(src)
|
2013-06-18 23:28:49 -04:00
|
|
|
if err != nil {
|
2013-06-20 23:20:16 -04:00
|
|
|
return err
|
2013-06-18 23:28:49 -04:00
|
|
|
}
|
|
|
|
if srcSt.IsDir() {
|
2013-06-20 23:20:16 -04:00
|
|
|
return fmt.Errorf("Can't copy a directory")
|
2013-06-18 23:28:49 -04:00
|
|
|
}
|
2013-06-20 23:20:16 -04:00
|
|
|
// Clean up the trailing /
|
|
|
|
if dst[len(dst)-1] == '/' {
|
|
|
|
dst = path.Join(dst, filepath.Base(src))
|
2013-06-18 23:28:49 -04:00
|
|
|
}
|
2013-06-20 23:20:16 -04:00
|
|
|
// Create the holding directory if necessary
|
|
|
|
if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
tw := tar.NewWriter(buf)
|
|
|
|
hdr, err := tar.FileInfoHeader(srcSt, "")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
hdr.Name = filepath.Base(dst)
|
|
|
|
if err := tw.WriteHeader(hdr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
srcF, err := os.Open(src)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := io.Copy(tw, srcF); err != nil {
|
|
|
|
return err
|
2013-06-18 23:28:49 -04:00
|
|
|
}
|
2013-06-20 23:20:16 -04:00
|
|
|
tw.Close()
|
|
|
|
return Untar(buf, filepath.Dir(dst))
|
2013-06-14 19:43:39 -04:00
|
|
|
}
|
|
|
|
|
2013-03-29 16:18:59 -04:00
|
|
|
// CmdStream executes a command, and returns its stdout as a stream.
|
|
|
|
// If the command fails to run or doesn't complete successfully, an error
|
|
|
|
// will be returned, including anything written on stderr.
|
2013-03-11 08:42:36 -04:00
|
|
|
func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
|
|
|
|
stdout, err := cmd.StdoutPipe()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
stderr, err := cmd.StderrPipe()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
pipeR, pipeW := io.Pipe()
|
2013-03-29 07:42:17 -04:00
|
|
|
errChan := make(chan []byte)
|
2013-03-29 16:26:02 -04:00
|
|
|
// Collect stderr, we will use it in case of an error
|
2013-03-11 08:42:36 -04:00
|
|
|
go func() {
|
|
|
|
errText, e := ioutil.ReadAll(stderr)
|
|
|
|
if e != nil {
|
|
|
|
errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
|
|
|
|
}
|
2013-03-29 07:42:17 -04:00
|
|
|
errChan <- errText
|
|
|
|
}()
|
2013-03-29 16:26:02 -04:00
|
|
|
// Copy stdout to the returned pipe
|
2013-03-29 07:42:17 -04:00
|
|
|
go func() {
|
|
|
|
_, err := io.Copy(pipeW, stdout)
|
|
|
|
if err != nil {
|
|
|
|
pipeW.CloseWithError(err)
|
|
|
|
}
|
|
|
|
errText := <-errChan
|
2013-03-11 08:42:36 -04:00
|
|
|
if err := cmd.Wait(); err != nil {
|
2013-07-02 17:47:58 -04:00
|
|
|
pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
|
2013-03-11 08:42:36 -04:00
|
|
|
} else {
|
|
|
|
pipeW.Close()
|
|
|
|
}
|
|
|
|
}()
|
2013-03-29 16:26:02 -04:00
|
|
|
// Run the command and return the pipe
|
2013-03-11 08:42:36 -04:00
|
|
|
if err := cmd.Start(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return pipeR, nil
|
|
|
|
}
|
2013-04-21 17:23:55 -04:00
|
|
|
|
|
|
|
// NewTempArchive reads the content of src into a temporary file, and returns the contents
|
|
|
|
// of that file as an archive. The archive can only be read once - as soon as reading completes,
|
|
|
|
// the file will be deleted.
|
|
|
|
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
|
|
|
|
f, err := ioutil.TempFile(dir, "")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if _, err := io.Copy(f, src); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if _, err := f.Seek(0, 0); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
st, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
size := st.Size()
|
|
|
|
return &TempArchive{f, size}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type TempArchive struct {
|
|
|
|
*os.File
|
|
|
|
Size int64 // Pre-computed from Stat().Size() as a convenience
|
|
|
|
}
|
|
|
|
|
|
|
|
func (archive *TempArchive) Read(data []byte) (int, error) {
|
|
|
|
n, err := archive.File.Read(data)
|
|
|
|
if err != nil {
|
|
|
|
os.Remove(archive.File.Name())
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|