mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #3126 from unclejack/remove_vendored_tar
Remove vendored dotcloud/tar
This commit is contained in:
commit
cdc07f7d5c
22 changed files with 0 additions and 2304 deletions
27
vendor/src/github.com/dotcloud/tar/LICENSE
vendored
27
vendor/src/github.com/dotcloud/tar/LICENSE
vendored
|
@ -1,27 +0,0 @@
|
||||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
3
vendor/src/github.com/dotcloud/tar/README
vendored
3
vendor/src/github.com/dotcloud/tar/README
vendored
|
@ -1,3 +0,0 @@
|
||||||
This is a fork of the upstream Go [archive/tar](http://golang.org/pkg/archive/tar/) package to add PAX header support.
|
|
||||||
|
|
||||||
You can monitor the upstream pull request [here](https://codereview.appspot.com/12561043/).
|
|
299
vendor/src/github.com/dotcloud/tar/common.go
vendored
299
vendor/src/github.com/dotcloud/tar/common.go
vendored
|
@ -1,299 +0,0 @@
|
||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package tar implements access to tar archives.
|
|
||||||
// It aims to cover most of the variations, including those produced
|
|
||||||
// by GNU and BSD tars.
|
|
||||||
//
|
|
||||||
// References:
|
|
||||||
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
|
|
||||||
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
|
|
||||||
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
|
|
||||||
package tar
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
blockSize = 512
|
|
||||||
|
|
||||||
// Types
|
|
||||||
TypeReg = '0' // regular file
|
|
||||||
TypeRegA = '\x00' // regular file
|
|
||||||
TypeLink = '1' // hard link
|
|
||||||
TypeSymlink = '2' // symbolic link
|
|
||||||
TypeChar = '3' // character device node
|
|
||||||
TypeBlock = '4' // block device node
|
|
||||||
TypeDir = '5' // directory
|
|
||||||
TypeFifo = '6' // fifo node
|
|
||||||
TypeCont = '7' // reserved
|
|
||||||
TypeXHeader = 'x' // extended header
|
|
||||||
TypeXGlobalHeader = 'g' // global extended header
|
|
||||||
TypeGNULongName = 'L' // Next file has a long name
|
|
||||||
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Header represents a single header in a tar archive.
|
|
||||||
// Some fields may not be populated.
|
|
||||||
type Header struct {
|
|
||||||
Name string // name of header file entry
|
|
||||||
Mode int64 // permission and mode bits
|
|
||||||
Uid int // user id of owner
|
|
||||||
Gid int // group id of owner
|
|
||||||
Size int64 // length in bytes
|
|
||||||
ModTime time.Time // modified time
|
|
||||||
Typeflag byte // type of header entry
|
|
||||||
Linkname string // target name of link
|
|
||||||
Uname string // user name of owner
|
|
||||||
Gname string // group name of owner
|
|
||||||
Devmajor int64 // major number of character or block device
|
|
||||||
Devminor int64 // minor number of character or block device
|
|
||||||
AccessTime time.Time // access time
|
|
||||||
ChangeTime time.Time // status change time
|
|
||||||
}
|
|
||||||
|
|
||||||
// File name constants from the tar spec.
|
|
||||||
const (
|
|
||||||
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
|
|
||||||
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileInfo returns an os.FileInfo for the Header.
|
|
||||||
func (h *Header) FileInfo() os.FileInfo {
|
|
||||||
return headerFileInfo{h}
|
|
||||||
}
|
|
||||||
|
|
||||||
// headerFileInfo implements os.FileInfo.
|
|
||||||
type headerFileInfo struct {
|
|
||||||
h *Header
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
|
|
||||||
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
|
|
||||||
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
|
|
||||||
func (fi headerFileInfo) Sys() interface{} { return fi.h }
|
|
||||||
|
|
||||||
// Name returns the base name of the file.
|
|
||||||
func (fi headerFileInfo) Name() string {
|
|
||||||
if fi.IsDir() {
|
|
||||||
return path.Clean(fi.h.Name)
|
|
||||||
}
|
|
||||||
return fi.h.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mode returns the permission and mode bits for the headerFileInfo.
|
|
||||||
func (fi headerFileInfo) Mode() (mode os.FileMode) {
|
|
||||||
// Set file permission bits.
|
|
||||||
mode = os.FileMode(fi.h.Mode).Perm()
|
|
||||||
|
|
||||||
// Set setuid, setgid and sticky bits.
|
|
||||||
if fi.h.Mode&c_ISUID != 0 {
|
|
||||||
// setuid
|
|
||||||
mode |= os.ModeSetuid
|
|
||||||
}
|
|
||||||
if fi.h.Mode&c_ISGID != 0 {
|
|
||||||
// setgid
|
|
||||||
mode |= os.ModeSetgid
|
|
||||||
}
|
|
||||||
if fi.h.Mode&c_ISVTX != 0 {
|
|
||||||
// sticky
|
|
||||||
mode |= os.ModeSticky
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set file mode bits.
|
|
||||||
// clear perm, setuid, setgid and sticky bits.
|
|
||||||
m := os.FileMode(fi.h.Mode) &^ 07777
|
|
||||||
if m == c_ISDIR {
|
|
||||||
// directory
|
|
||||||
mode |= os.ModeDir
|
|
||||||
}
|
|
||||||
if m == c_ISFIFO {
|
|
||||||
// named pipe (FIFO)
|
|
||||||
mode |= os.ModeNamedPipe
|
|
||||||
}
|
|
||||||
if m == c_ISLNK {
|
|
||||||
// symbolic link
|
|
||||||
mode |= os.ModeSymlink
|
|
||||||
}
|
|
||||||
if m == c_ISBLK {
|
|
||||||
// device file
|
|
||||||
mode |= os.ModeDevice
|
|
||||||
}
|
|
||||||
if m == c_ISCHR {
|
|
||||||
// Unix character device
|
|
||||||
mode |= os.ModeDevice
|
|
||||||
mode |= os.ModeCharDevice
|
|
||||||
}
|
|
||||||
if m == c_ISSOCK {
|
|
||||||
// Unix domain socket
|
|
||||||
mode |= os.ModeSocket
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fi.h.Typeflag {
|
|
||||||
case TypeLink, TypeSymlink:
|
|
||||||
// hard link, symbolic link
|
|
||||||
mode |= os.ModeSymlink
|
|
||||||
case TypeChar:
|
|
||||||
// character device node
|
|
||||||
mode |= os.ModeDevice
|
|
||||||
mode |= os.ModeCharDevice
|
|
||||||
case TypeBlock:
|
|
||||||
// block device node
|
|
||||||
mode |= os.ModeDevice
|
|
||||||
case TypeDir:
|
|
||||||
// directory
|
|
||||||
mode |= os.ModeDir
|
|
||||||
case TypeFifo:
|
|
||||||
// fifo node
|
|
||||||
mode |= os.ModeNamedPipe
|
|
||||||
}
|
|
||||||
|
|
||||||
return mode
|
|
||||||
}
|
|
||||||
|
|
||||||
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
|
||||||
var sysStat func(fi os.FileInfo, h *Header) error
|
|
||||||
|
|
||||||
// Mode constants from the tar spec.
|
|
||||||
const (
|
|
||||||
c_ISUID = 04000 // Set uid
|
|
||||||
c_ISGID = 02000 // Set gid
|
|
||||||
c_ISVTX = 01000 // Save text (sticky bit)
|
|
||||||
c_ISDIR = 040000 // Directory
|
|
||||||
c_ISFIFO = 010000 // FIFO
|
|
||||||
c_ISREG = 0100000 // Regular file
|
|
||||||
c_ISLNK = 0120000 // Symbolic link
|
|
||||||
c_ISBLK = 060000 // Block special file
|
|
||||||
c_ISCHR = 020000 // Character special file
|
|
||||||
c_ISSOCK = 0140000 // Socket
|
|
||||||
)
|
|
||||||
|
|
||||||
// Keywords for the PAX Extended Header
|
|
||||||
const (
|
|
||||||
paxAtime = "atime"
|
|
||||||
paxCharset = "charset"
|
|
||||||
paxComment = "comment"
|
|
||||||
paxCtime = "ctime" // please note that ctime is not a valid pax header.
|
|
||||||
paxGid = "gid"
|
|
||||||
paxGname = "gname"
|
|
||||||
paxLinkpath = "linkpath"
|
|
||||||
paxMtime = "mtime"
|
|
||||||
paxPath = "path"
|
|
||||||
paxSize = "size"
|
|
||||||
paxUid = "uid"
|
|
||||||
paxUname = "uname"
|
|
||||||
paxNone = ""
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileInfoHeader creates a partially-populated Header from fi.
|
|
||||||
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
|
||||||
// If fi describes a directory, a slash is appended to the name.
|
|
||||||
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
|
||||||
if fi == nil {
|
|
||||||
return nil, errors.New("tar: FileInfo is nil")
|
|
||||||
}
|
|
||||||
fm := fi.Mode()
|
|
||||||
h := &Header{
|
|
||||||
Name: fi.Name(),
|
|
||||||
ModTime: fi.ModTime(),
|
|
||||||
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case fm.IsRegular():
|
|
||||||
h.Mode |= c_ISREG
|
|
||||||
h.Typeflag = TypeReg
|
|
||||||
h.Size = fi.Size()
|
|
||||||
case fi.IsDir():
|
|
||||||
h.Typeflag = TypeDir
|
|
||||||
h.Mode |= c_ISDIR
|
|
||||||
h.Name += "/"
|
|
||||||
case fm&os.ModeSymlink != 0:
|
|
||||||
h.Typeflag = TypeSymlink
|
|
||||||
h.Mode |= c_ISLNK
|
|
||||||
h.Linkname = link
|
|
||||||
case fm&os.ModeDevice != 0:
|
|
||||||
if fm&os.ModeCharDevice != 0 {
|
|
||||||
h.Mode |= c_ISCHR
|
|
||||||
h.Typeflag = TypeChar
|
|
||||||
} else {
|
|
||||||
h.Mode |= c_ISBLK
|
|
||||||
h.Typeflag = TypeBlock
|
|
||||||
}
|
|
||||||
case fm&os.ModeNamedPipe != 0:
|
|
||||||
h.Typeflag = TypeFifo
|
|
||||||
h.Mode |= c_ISFIFO
|
|
||||||
case fm&os.ModeSocket != 0:
|
|
||||||
h.Mode |= c_ISSOCK
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
|
|
||||||
}
|
|
||||||
if fm&os.ModeSetuid != 0 {
|
|
||||||
h.Mode |= c_ISUID
|
|
||||||
}
|
|
||||||
if fm&os.ModeSetgid != 0 {
|
|
||||||
h.Mode |= c_ISGID
|
|
||||||
}
|
|
||||||
if fm&os.ModeSticky != 0 {
|
|
||||||
h.Mode |= c_ISVTX
|
|
||||||
}
|
|
||||||
if sysStat != nil {
|
|
||||||
return h, sysStat(fi, h)
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var zeroBlock = make([]byte, blockSize)
|
|
||||||
|
|
||||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
|
|
||||||
// We compute and return both.
|
|
||||||
func checksum(header []byte) (unsigned int64, signed int64) {
|
|
||||||
for i := 0; i < len(header); i++ {
|
|
||||||
if i == 148 {
|
|
||||||
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
|
|
||||||
unsigned += ' ' * 8
|
|
||||||
signed += ' ' * 8
|
|
||||||
i += 7
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
unsigned += int64(header[i])
|
|
||||||
signed += int64(int8(header[i]))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type slicer []byte
|
|
||||||
|
|
||||||
func (sp *slicer) next(n int) (b []byte) {
|
|
||||||
s := *sp
|
|
||||||
b, *sp = s[0:n], s[n:]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func isASCII(s string) bool {
|
|
||||||
for _, c := range s {
|
|
||||||
if c >= 0x80 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func toASCII(s string) string {
|
|
||||||
if isASCII(s) {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
var buf bytes.Buffer
|
|
||||||
for _, c := range s {
|
|
||||||
if c < 0x80 {
|
|
||||||
buf.WriteByte(byte(c))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
|
@ -1,79 +0,0 @@
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package tar_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Example() {
|
|
||||||
// Create a buffer to write our archive to.
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
// Create a new tar archive.
|
|
||||||
tw := tar.NewWriter(buf)
|
|
||||||
|
|
||||||
// Add some files to the archive.
|
|
||||||
var files = []struct {
|
|
||||||
Name, Body string
|
|
||||||
}{
|
|
||||||
{"readme.txt", "This archive contains some text files."},
|
|
||||||
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
|
|
||||||
{"todo.txt", "Get animal handling licence."},
|
|
||||||
}
|
|
||||||
for _, file := range files {
|
|
||||||
hdr := &tar.Header{
|
|
||||||
Name: file.Name,
|
|
||||||
Size: int64(len(file.Body)),
|
|
||||||
}
|
|
||||||
if err := tw.WriteHeader(hdr); err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
if _, err := tw.Write([]byte(file.Body)); err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Make sure to check the error on Close.
|
|
||||||
if err := tw.Close(); err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open the tar archive for reading.
|
|
||||||
r := bytes.NewReader(buf.Bytes())
|
|
||||||
tr := tar.NewReader(r)
|
|
||||||
|
|
||||||
// Iterate through the files in the archive.
|
|
||||||
for {
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
// end of tar archive
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
fmt.Printf("Contents of %s:\n", hdr.Name)
|
|
||||||
if _, err := io.Copy(os.Stdout, tr); err != nil {
|
|
||||||
log.Fatalln(err)
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Contents of readme.txt:
|
|
||||||
// This archive contains some text files.
|
|
||||||
// Contents of gopher.txt:
|
|
||||||
// Gopher names:
|
|
||||||
// George
|
|
||||||
// Geoffrey
|
|
||||||
// Gonzo
|
|
||||||
// Contents of todo.txt:
|
|
||||||
// Get animal handling licence.
|
|
||||||
}
|
|
396
vendor/src/github.com/dotcloud/tar/reader.go
vendored
396
vendor/src/github.com/dotcloud/tar/reader.go
vendored
|
@ -1,396 +0,0 @@
|
||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package tar
|
|
||||||
|
|
||||||
// TODO(dsymonds):
|
|
||||||
// - pax extensions
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrHeader = errors.New("archive/tar: invalid tar header")
|
|
||||||
)
|
|
||||||
|
|
||||||
const maxNanoSecondIntSize = 9
|
|
||||||
|
|
||||||
// A Reader provides sequential access to the contents of a tar archive.
|
|
||||||
// A tar archive consists of a sequence of files.
|
|
||||||
// The Next method advances to the next file in the archive (including the first),
|
|
||||||
// and then it can be treated as an io.Reader to access the file's data.
|
|
||||||
type Reader struct {
|
|
||||||
r io.Reader
|
|
||||||
err error
|
|
||||||
nb int64 // number of unread bytes for current file entry
|
|
||||||
pad int64 // amount of padding (ignored) after current file entry
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader creates a new Reader reading from r.
|
|
||||||
func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
|
|
||||||
|
|
||||||
// Next advances to the next entry in the tar archive.
|
|
||||||
func (tr *Reader) Next() (*Header, error) {
|
|
||||||
var hdr *Header
|
|
||||||
if tr.err == nil {
|
|
||||||
tr.skipUnread()
|
|
||||||
}
|
|
||||||
if tr.err != nil {
|
|
||||||
return hdr, tr.err
|
|
||||||
}
|
|
||||||
hdr = tr.readHeader()
|
|
||||||
if hdr == nil {
|
|
||||||
return hdr, tr.err
|
|
||||||
}
|
|
||||||
// Check for PAX/GNU header.
|
|
||||||
switch hdr.Typeflag {
|
|
||||||
case TypeXHeader:
|
|
||||||
// PAX extended header
|
|
||||||
headers, err := parsePAX(tr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// We actually read the whole file,
|
|
||||||
// but this skips alignment padding
|
|
||||||
tr.skipUnread()
|
|
||||||
hdr = tr.readHeader()
|
|
||||||
mergePAX(hdr, headers)
|
|
||||||
return hdr, nil
|
|
||||||
case TypeGNULongName:
|
|
||||||
// We have a GNU long name header. Its contents are the real file name.
|
|
||||||
realname, err := ioutil.ReadAll(tr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
hdr.Name = cString(realname)
|
|
||||||
return hdr, err
|
|
||||||
case TypeGNULongLink:
|
|
||||||
// We have a GNU long link header.
|
|
||||||
realname, err := ioutil.ReadAll(tr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
hdr.Linkname = cString(realname)
|
|
||||||
return hdr, err
|
|
||||||
}
|
|
||||||
return hdr, tr.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// mergePAX merges well known headers according to PAX standard.
|
|
||||||
// In general headers with the same name as those found
|
|
||||||
// in the header struct overwrite those found in the header
|
|
||||||
// struct with higher precision or longer values. Esp. useful
|
|
||||||
// for name and linkname fields.
|
|
||||||
func mergePAX(hdr *Header, headers map[string]string) error {
|
|
||||||
for k, v := range headers {
|
|
||||||
switch k {
|
|
||||||
case paxPath:
|
|
||||||
hdr.Name = v
|
|
||||||
case paxLinkpath:
|
|
||||||
hdr.Linkname = v
|
|
||||||
case paxGname:
|
|
||||||
hdr.Gname = v
|
|
||||||
case paxUname:
|
|
||||||
hdr.Uname = v
|
|
||||||
case paxUid:
|
|
||||||
uid, err := strconv.ParseInt(v, 10, 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hdr.Uid = int(uid)
|
|
||||||
case paxGid:
|
|
||||||
gid, err := strconv.ParseInt(v, 10, 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hdr.Gid = int(gid)
|
|
||||||
case paxAtime:
|
|
||||||
t, err := parsePAXTime(v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hdr.AccessTime = t
|
|
||||||
case paxMtime:
|
|
||||||
t, err := parsePAXTime(v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hdr.ModTime = t
|
|
||||||
case paxCtime:
|
|
||||||
t, err := parsePAXTime(v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hdr.ChangeTime = t
|
|
||||||
case paxSize:
|
|
||||||
size, err := strconv.ParseInt(v, 10, 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hdr.Size = int64(size)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parsePAXTime takes a string of the form %d.%d as described in
|
|
||||||
// the PAX specification.
|
|
||||||
func parsePAXTime(t string) (time.Time, error) {
|
|
||||||
buf := []byte(t)
|
|
||||||
pos := bytes.IndexByte(buf, '.')
|
|
||||||
var seconds, nanoseconds int64
|
|
||||||
var err error
|
|
||||||
if pos == -1 {
|
|
||||||
seconds, err = strconv.ParseInt(t, 10, 0)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
nano_buf := string(buf[pos+1:])
|
|
||||||
// Pad as needed before converting to a decimal.
|
|
||||||
// For example .030 -> .030000000 -> 30000000 nanoseconds
|
|
||||||
if len(nano_buf) < maxNanoSecondIntSize {
|
|
||||||
// Right pad
|
|
||||||
nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
|
|
||||||
} else if len(nano_buf) > maxNanoSecondIntSize {
|
|
||||||
// Right truncate
|
|
||||||
nano_buf = nano_buf[:maxNanoSecondIntSize]
|
|
||||||
}
|
|
||||||
nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ts := time.Unix(seconds, nanoseconds)
|
|
||||||
return ts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parsePAX parses PAX headers.
|
|
||||||
// If an extended header (type 'x') is invalid, ErrHeader is returned
|
|
||||||
func parsePAX(r io.Reader) (map[string]string, error) {
|
|
||||||
buf, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
headers := make(map[string]string)
|
|
||||||
// Each record is constructed as
|
|
||||||
// "%d %s=%s\n", length, keyword, value
|
|
||||||
for len(buf) > 0 {
|
|
||||||
// or the header was empty to start with.
|
|
||||||
var sp int
|
|
||||||
// The size field ends at the first space.
|
|
||||||
sp = bytes.IndexByte(buf, ' ')
|
|
||||||
if sp == -1 {
|
|
||||||
return nil, ErrHeader
|
|
||||||
}
|
|
||||||
// Parse the first token as a decimal integer.
|
|
||||||
n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, ErrHeader
|
|
||||||
}
|
|
||||||
// Extract everything between the decimal and the n -1 on the
|
|
||||||
// beginning to to eat the ' ', -1 on the end to skip the newline.
|
|
||||||
var record []byte
|
|
||||||
record, buf = buf[sp+1:n-1], buf[n:]
|
|
||||||
// The first equals is guaranteed to mark the end of the key.
|
|
||||||
// Everything else is value.
|
|
||||||
eq := bytes.IndexByte(record, '=')
|
|
||||||
if eq == -1 {
|
|
||||||
return nil, ErrHeader
|
|
||||||
}
|
|
||||||
key, value := record[:eq], record[eq+1:]
|
|
||||||
headers[string(key)] = string(value)
|
|
||||||
}
|
|
||||||
return headers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// cString parses bytes as a NUL-terminated C-style string.
|
|
||||||
// If a NUL byte is not found then the whole slice is returned as a string.
|
|
||||||
func cString(b []byte) string {
|
|
||||||
n := 0
|
|
||||||
for n < len(b) && b[n] != 0 {
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
return string(b[0:n])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tr *Reader) octal(b []byte) int64 {
|
|
||||||
// Check for binary format first.
|
|
||||||
if len(b) > 0 && b[0]&0x80 != 0 {
|
|
||||||
var x int64
|
|
||||||
for i, c := range b {
|
|
||||||
if i == 0 {
|
|
||||||
c &= 0x7f // ignore signal bit in first byte
|
|
||||||
}
|
|
||||||
x = x<<8 | int64(c)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// Because unused fields are filled with NULs, we need
|
|
||||||
// to skip leading NULs. Fields may also be padded with
|
|
||||||
// spaces or NULs.
|
|
||||||
// So we remove leading and trailing NULs and spaces to
|
|
||||||
// be sure.
|
|
||||||
b = bytes.Trim(b, " \x00")
|
|
||||||
|
|
||||||
if len(b) == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
x, err := strconv.ParseUint(cString(b), 8, 64)
|
|
||||||
if err != nil {
|
|
||||||
tr.err = err
|
|
||||||
}
|
|
||||||
return int64(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
|
|
||||||
func (tr *Reader) skipUnread() {
|
|
||||||
nr := tr.nb + tr.pad // number of bytes to skip
|
|
||||||
tr.nb, tr.pad = 0, 0
|
|
||||||
if sr, ok := tr.r.(io.Seeker); ok {
|
|
||||||
if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tr *Reader) verifyChecksum(header []byte) bool {
|
|
||||||
if tr.err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
given := tr.octal(header[148:156])
|
|
||||||
unsigned, signed := checksum(header)
|
|
||||||
return given == unsigned || given == signed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tr *Reader) readHeader() *Header {
|
|
||||||
header := make([]byte, blockSize)
|
|
||||||
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Two blocks of zero bytes marks the end of the archive.
|
|
||||||
if bytes.Equal(header, zeroBlock[0:blockSize]) {
|
|
||||||
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if bytes.Equal(header, zeroBlock[0:blockSize]) {
|
|
||||||
tr.err = io.EOF
|
|
||||||
} else {
|
|
||||||
tr.err = ErrHeader // zero block and then non-zero block
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !tr.verifyChecksum(header) {
|
|
||||||
tr.err = ErrHeader
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unpack
|
|
||||||
hdr := new(Header)
|
|
||||||
s := slicer(header)
|
|
||||||
|
|
||||||
hdr.Name = cString(s.next(100))
|
|
||||||
hdr.Mode = tr.octal(s.next(8))
|
|
||||||
hdr.Uid = int(tr.octal(s.next(8)))
|
|
||||||
hdr.Gid = int(tr.octal(s.next(8)))
|
|
||||||
hdr.Size = tr.octal(s.next(12))
|
|
||||||
hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
|
|
||||||
s.next(8) // chksum
|
|
||||||
hdr.Typeflag = s.next(1)[0]
|
|
||||||
hdr.Linkname = cString(s.next(100))
|
|
||||||
|
|
||||||
// The remainder of the header depends on the value of magic.
|
|
||||||
// The original (v7) version of tar had no explicit magic field,
|
|
||||||
// so its magic bytes, like the rest of the block, are NULs.
|
|
||||||
magic := string(s.next(8)) // contains version field as well.
|
|
||||||
var format string
|
|
||||||
switch magic {
|
|
||||||
case "ustar\x0000": // POSIX tar (1003.1-1988)
|
|
||||||
if string(header[508:512]) == "tar\x00" {
|
|
||||||
format = "star"
|
|
||||||
} else {
|
|
||||||
format = "posix"
|
|
||||||
}
|
|
||||||
case "ustar \x00": // old GNU tar
|
|
||||||
format = "gnu"
|
|
||||||
}
|
|
||||||
|
|
||||||
switch format {
|
|
||||||
case "posix", "gnu", "star":
|
|
||||||
hdr.Uname = cString(s.next(32))
|
|
||||||
hdr.Gname = cString(s.next(32))
|
|
||||||
devmajor := s.next(8)
|
|
||||||
devminor := s.next(8)
|
|
||||||
if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
|
|
||||||
hdr.Devmajor = tr.octal(devmajor)
|
|
||||||
hdr.Devminor = tr.octal(devminor)
|
|
||||||
}
|
|
||||||
var prefix string
|
|
||||||
switch format {
|
|
||||||
case "posix", "gnu":
|
|
||||||
prefix = cString(s.next(155))
|
|
||||||
case "star":
|
|
||||||
prefix = cString(s.next(131))
|
|
||||||
hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0)
|
|
||||||
hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0)
|
|
||||||
}
|
|
||||||
if len(prefix) > 0 {
|
|
||||||
hdr.Name = prefix + "/" + hdr.Name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tr.err != nil {
|
|
||||||
tr.err = ErrHeader
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Maximum value of hdr.Size is 64 GB (12 octal digits),
|
|
||||||
// so there's no risk of int64 overflowing.
|
|
||||||
tr.nb = int64(hdr.Size)
|
|
||||||
tr.pad = -tr.nb & (blockSize - 1) // blockSize is a power of two
|
|
||||||
|
|
||||||
return hdr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads from the current entry in the tar archive.
|
|
||||||
// It returns 0, io.EOF when it reaches the end of that entry,
|
|
||||||
// until Next is called to advance to the next entry.
|
|
||||||
func (tr *Reader) Read(b []byte) (n int, err error) {
|
|
||||||
if tr.nb == 0 {
|
|
||||||
// file consumed
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if int64(len(b)) > tr.nb {
|
|
||||||
b = b[0:tr.nb]
|
|
||||||
}
|
|
||||||
n, err = tr.r.Read(b)
|
|
||||||
tr.nb -= int64(n)
|
|
||||||
|
|
||||||
if err == io.EOF && tr.nb > 0 {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
tr.err = err
|
|
||||||
return
|
|
||||||
}
|
|
385
vendor/src/github.com/dotcloud/tar/reader_test.go
vendored
385
vendor/src/github.com/dotcloud/tar/reader_test.go
vendored
|
@ -1,385 +0,0 @@
|
||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package tar
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/md5"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type untarTest struct {
|
|
||||||
file string
|
|
||||||
headers []*Header
|
|
||||||
cksums []string
|
|
||||||
}
|
|
||||||
|
|
||||||
var gnuTarTest = &untarTest{
|
|
||||||
file: "testdata/gnu.tar",
|
|
||||||
headers: []*Header{
|
|
||||||
{
|
|
||||||
Name: "small.txt",
|
|
||||||
Mode: 0640,
|
|
||||||
Uid: 73025,
|
|
||||||
Gid: 5000,
|
|
||||||
Size: 5,
|
|
||||||
ModTime: time.Unix(1244428340, 0),
|
|
||||||
Typeflag: '0',
|
|
||||||
Uname: "dsymonds",
|
|
||||||
Gname: "eng",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "small2.txt",
|
|
||||||
Mode: 0640,
|
|
||||||
Uid: 73025,
|
|
||||||
Gid: 5000,
|
|
||||||
Size: 11,
|
|
||||||
ModTime: time.Unix(1244436044, 0),
|
|
||||||
Typeflag: '0',
|
|
||||||
Uname: "dsymonds",
|
|
||||||
Gname: "eng",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
cksums: []string{
|
|
||||||
"e38b27eaccb4391bdec553a7f3ae6b2f",
|
|
||||||
"c65bd2e50a56a2138bf1716f2fd56fe9",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var untarTests = []*untarTest{
|
|
||||||
gnuTarTest,
|
|
||||||
{
|
|
||||||
file: "testdata/star.tar",
|
|
||||||
headers: []*Header{
|
|
||||||
{
|
|
||||||
Name: "small.txt",
|
|
||||||
Mode: 0640,
|
|
||||||
Uid: 73025,
|
|
||||||
Gid: 5000,
|
|
||||||
Size: 5,
|
|
||||||
ModTime: time.Unix(1244592783, 0),
|
|
||||||
Typeflag: '0',
|
|
||||||
Uname: "dsymonds",
|
|
||||||
Gname: "eng",
|
|
||||||
AccessTime: time.Unix(1244592783, 0),
|
|
||||||
ChangeTime: time.Unix(1244592783, 0),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "small2.txt",
|
|
||||||
Mode: 0640,
|
|
||||||
Uid: 73025,
|
|
||||||
Gid: 5000,
|
|
||||||
Size: 11,
|
|
||||||
ModTime: time.Unix(1244592783, 0),
|
|
||||||
Typeflag: '0',
|
|
||||||
Uname: "dsymonds",
|
|
||||||
Gname: "eng",
|
|
||||||
AccessTime: time.Unix(1244592783, 0),
|
|
||||||
ChangeTime: time.Unix(1244592783, 0),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
file: "testdata/v7.tar",
|
|
||||||
headers: []*Header{
|
|
||||||
{
|
|
||||||
Name: "small.txt",
|
|
||||||
Mode: 0444,
|
|
||||||
Uid: 73025,
|
|
||||||
Gid: 5000,
|
|
||||||
Size: 5,
|
|
||||||
ModTime: time.Unix(1244593104, 0),
|
|
||||||
Typeflag: '\x00',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "small2.txt",
|
|
||||||
Mode: 0444,
|
|
||||||
Uid: 73025,
|
|
||||||
Gid: 5000,
|
|
||||||
Size: 11,
|
|
||||||
ModTime: time.Unix(1244593104, 0),
|
|
||||||
Typeflag: '\x00',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
file: "testdata/pax.tar",
|
|
||||||
headers: []*Header{
|
|
||||||
{
|
|
||||||
Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
|
|
||||||
Mode: 0664,
|
|
||||||
Uid: 1000,
|
|
||||||
Gid: 1000,
|
|
||||||
Uname: "shane",
|
|
||||||
Gname: "shane",
|
|
||||||
Size: 7,
|
|
||||||
ModTime: time.Unix(1350244992, 23960108),
|
|
||||||
ChangeTime: time.Unix(1350244992, 23960108),
|
|
||||||
AccessTime: time.Unix(1350244992, 23960108),
|
|
||||||
Typeflag: TypeReg,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "a/b",
|
|
||||||
Mode: 0777,
|
|
||||||
Uid: 1000,
|
|
||||||
Gid: 1000,
|
|
||||||
Uname: "shane",
|
|
||||||
Gname: "shane",
|
|
||||||
Size: 0,
|
|
||||||
ModTime: time.Unix(1350266320, 910238425),
|
|
||||||
ChangeTime: time.Unix(1350266320, 910238425),
|
|
||||||
AccessTime: time.Unix(1350266320, 910238425),
|
|
||||||
Typeflag: TypeSymlink,
|
|
||||||
Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
file: "testdata/nil-uid.tar", // golang.org/issue/5290
|
|
||||||
headers: []*Header{
|
|
||||||
{
|
|
||||||
Name: "P1050238.JPG.log",
|
|
||||||
Mode: 0664,
|
|
||||||
Uid: 0,
|
|
||||||
Gid: 0,
|
|
||||||
Size: 14,
|
|
||||||
ModTime: time.Unix(1365454838, 0),
|
|
||||||
Typeflag: TypeReg,
|
|
||||||
Linkname: "",
|
|
||||||
Uname: "eyefi",
|
|
||||||
Gname: "eyefi",
|
|
||||||
Devmajor: 0,
|
|
||||||
Devminor: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReader(t *testing.T) {
|
|
||||||
testLoop:
|
|
||||||
for i, test := range untarTests {
|
|
||||||
f, err := os.Open(test.file)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
tr := NewReader(f)
|
|
||||||
for j, header := range test.headers {
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
if err != nil || hdr == nil {
|
|
||||||
t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
|
|
||||||
f.Close()
|
|
||||||
continue testLoop
|
|
||||||
}
|
|
||||||
if *hdr != *header {
|
|
||||||
t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
|
|
||||||
i, j, *hdr, *header)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
continue testLoop
|
|
||||||
}
|
|
||||||
if hdr != nil || err != nil {
|
|
||||||
t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPartialRead(t *testing.T) {
|
|
||||||
f, err := os.Open("testdata/gnu.tar")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
tr := NewReader(f)
|
|
||||||
|
|
||||||
// Read the first four bytes; Next() should skip the last byte.
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
if err != nil || hdr == nil {
|
|
||||||
t.Fatalf("Didn't get first file: %v", err)
|
|
||||||
}
|
|
||||||
buf := make([]byte, 4)
|
|
||||||
if _, err := io.ReadFull(tr, buf); err != nil {
|
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if expected := []byte("Kilt"); !bytes.Equal(buf, expected) {
|
|
||||||
t.Errorf("Contents = %v, want %v", buf, expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second file
|
|
||||||
hdr, err = tr.Next()
|
|
||||||
if err != nil || hdr == nil {
|
|
||||||
t.Fatalf("Didn't get second file: %v", err)
|
|
||||||
}
|
|
||||||
buf = make([]byte, 6)
|
|
||||||
if _, err := io.ReadFull(tr, buf); err != nil {
|
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if expected := []byte("Google"); !bytes.Equal(buf, expected) {
|
|
||||||
t.Errorf("Contents = %v, want %v", buf, expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIncrementalRead(t *testing.T) {
|
|
||||||
test := gnuTarTest
|
|
||||||
f, err := os.Open(test.file)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
tr := NewReader(f)
|
|
||||||
|
|
||||||
headers := test.headers
|
|
||||||
cksums := test.cksums
|
|
||||||
nread := 0
|
|
||||||
|
|
||||||
// loop over all files
|
|
||||||
for ; ; nread++ {
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
if hdr == nil || err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// check the header
|
|
||||||
if *hdr != *headers[nread] {
|
|
||||||
t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
|
|
||||||
*hdr, headers[nread])
|
|
||||||
}
|
|
||||||
|
|
||||||
// read file contents in little chunks EOF,
|
|
||||||
// checksumming all the way
|
|
||||||
h := md5.New()
|
|
||||||
rdbuf := make([]uint8, 8)
|
|
||||||
for {
|
|
||||||
nr, err := tr.Read(rdbuf)
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Read: unexpected error %v\n", err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
h.Write(rdbuf[0:nr])
|
|
||||||
}
|
|
||||||
// verify checksum
|
|
||||||
have := fmt.Sprintf("%x", h.Sum(nil))
|
|
||||||
want := cksums[nread]
|
|
||||||
if want != have {
|
|
||||||
t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if nread != len(headers) {
|
|
||||||
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNonSeekable(t *testing.T) {
|
|
||||||
test := gnuTarTest
|
|
||||||
f, err := os.Open(test.file)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
type readerOnly struct {
|
|
||||||
io.Reader
|
|
||||||
}
|
|
||||||
tr := NewReader(readerOnly{f})
|
|
||||||
nread := 0
|
|
||||||
|
|
||||||
for ; ; nread++ {
|
|
||||||
_, err := tr.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if nread != len(test.headers) {
|
|
||||||
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParsePAXHeader(t *testing.T) {
|
|
||||||
paxTests := [][3]string{
|
|
||||||
{"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
|
|
||||||
{"a", "a=name", "9 a=name\n"}, // Test case involving multiple acceptable length
|
|
||||||
{"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}}
|
|
||||||
for _, test := range paxTests {
|
|
||||||
key, expected, raw := test[0], test[1], test[2]
|
|
||||||
reader := bytes.NewBuffer([]byte(raw))
|
|
||||||
headers, err := parsePAX(reader)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Couldn't parse correctly formatted headers: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.EqualFold(headers[key], expected) {
|
|
||||||
t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
trailer := make([]byte, 100)
|
|
||||||
n, err := reader.Read(trailer)
|
|
||||||
if err != io.EOF || n != 0 {
|
|
||||||
t.Error("Buffer wasn't consumed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
badHeader := bytes.NewBuffer([]byte("3 somelongkey="))
|
|
||||||
if _, err := parsePAX(badHeader); err != ErrHeader {
|
|
||||||
t.Fatal("Unexpected success when parsing bad header")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParsePAXTime(t *testing.T) {
|
|
||||||
// Some valid PAX time values
|
|
||||||
timestamps := map[string]time.Time{
|
|
||||||
"1350244992.023960108": time.Unix(1350244992, 23960108), // The commoon case
|
|
||||||
"1350244992.02396010": time.Unix(1350244992, 23960100), // Lower precision value
|
|
||||||
"1350244992.0239601089": time.Unix(1350244992, 23960108), // Higher precision value
|
|
||||||
"1350244992": time.Unix(1350244992, 0), // Low precision value
|
|
||||||
}
|
|
||||||
for input, expected := range timestamps {
|
|
||||||
ts, err := parsePAXTime(input)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !ts.Equal(expected) {
|
|
||||||
t.Fatalf("Time parsing failure %s %s", ts, expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMergePAX(t *testing.T) {
|
|
||||||
hdr := new(Header)
|
|
||||||
// Test a string, integer, and time based value.
|
|
||||||
headers := map[string]string{
|
|
||||||
"path": "a/b/c",
|
|
||||||
"uid": "1000",
|
|
||||||
"mtime": "1350244992.023960108",
|
|
||||||
}
|
|
||||||
err := mergePAX(hdr, headers)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
want := &Header{
|
|
||||||
Name: "a/b/c",
|
|
||||||
Uid: 1000,
|
|
||||||
ModTime: time.Unix(1350244992, 23960108),
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(hdr, want) {
|
|
||||||
t.Errorf("incorrect merge: got %+v, want %+v", hdr, want)
|
|
||||||
}
|
|
||||||
}
|
|
20
vendor/src/github.com/dotcloud/tar/stat_atim.go
vendored
20
vendor/src/github.com/dotcloud/tar/stat_atim.go
vendored
|
@ -1,20 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build linux openbsd
|
|
||||||
|
|
||||||
package tar
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func statAtime(st *syscall.Stat_t) time.Time {
|
|
||||||
return time.Unix(st.Atim.Unix())
|
|
||||||
}
|
|
||||||
|
|
||||||
func statCtime(st *syscall.Stat_t) time.Time {
|
|
||||||
return time.Unix(st.Ctim.Unix())
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build darwin freebsd netbsd
|
|
||||||
|
|
||||||
package tar
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func statAtime(st *syscall.Stat_t) time.Time {
|
|
||||||
return time.Unix(st.Atimespec.Unix())
|
|
||||||
}
|
|
||||||
|
|
||||||
func statCtime(st *syscall.Stat_t) time.Time {
|
|
||||||
return time.Unix(st.Ctimespec.Unix())
|
|
||||||
}
|
|
32
vendor/src/github.com/dotcloud/tar/stat_unix.go
vendored
32
vendor/src/github.com/dotcloud/tar/stat_unix.go
vendored
|
@ -1,32 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build linux darwin freebsd openbsd netbsd
|
|
||||||
|
|
||||||
package tar
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
sysStat = statUnix
|
|
||||||
}
|
|
||||||
|
|
||||||
func statUnix(fi os.FileInfo, h *Header) error {
|
|
||||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
h.Uid = int(sys.Uid)
|
|
||||||
h.Gid = int(sys.Gid)
|
|
||||||
// TODO(bradfitz): populate username & group. os/user
|
|
||||||
// doesn't cache LookupId lookups, and lacks group
|
|
||||||
// lookup functions.
|
|
||||||
h.AccessTime = statAtime(sys)
|
|
||||||
h.ChangeTime = statCtime(sys)
|
|
||||||
// TODO(bradfitz): major/minor device numbers?
|
|
||||||
return nil
|
|
||||||
}
|
|
271
vendor/src/github.com/dotcloud/tar/tar_test.go
vendored
271
vendor/src/github.com/dotcloud/tar/tar_test.go
vendored
|
@ -1,271 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package tar
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFileInfoHeader(t *testing.T) {
|
|
||||||
fi, err := os.Stat("testdata/small.txt")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
h, err := FileInfoHeader(fi, "")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("FileInfoHeader: %v", err)
|
|
||||||
}
|
|
||||||
if g, e := h.Name, "small.txt"; g != e {
|
|
||||||
t.Errorf("Name = %q; want %q", g, e)
|
|
||||||
}
|
|
||||||
if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
|
|
||||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
|
||||||
}
|
|
||||||
if g, e := h.Size, int64(5); g != e {
|
|
||||||
t.Errorf("Size = %v; want %v", g, e)
|
|
||||||
}
|
|
||||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
|
||||||
t.Errorf("ModTime = %v; want %v", g, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFileInfoHeaderDir(t *testing.T) {
|
|
||||||
fi, err := os.Stat("testdata")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
h, err := FileInfoHeader(fi, "")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("FileInfoHeader: %v", err)
|
|
||||||
}
|
|
||||||
if g, e := h.Name, "testdata/"; g != e {
|
|
||||||
t.Errorf("Name = %q; want %q", g, e)
|
|
||||||
}
|
|
||||||
// Ignoring c_ISGID for golang.org/issue/4867
|
|
||||||
if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
|
|
||||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
|
||||||
}
|
|
||||||
if g, e := h.Size, int64(0); g != e {
|
|
||||||
t.Errorf("Size = %v; want %v", g, e)
|
|
||||||
}
|
|
||||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
|
||||||
t.Errorf("ModTime = %v; want %v", g, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFileInfoHeaderSymlink(t *testing.T) {
|
|
||||||
h, err := FileInfoHeader(symlink{}, "some-target")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if g, e := h.Name, "some-symlink"; g != e {
|
|
||||||
t.Errorf("Name = %q; want %q", g, e)
|
|
||||||
}
|
|
||||||
if g, e := h.Linkname, "some-target"; g != e {
|
|
||||||
t.Errorf("Linkname = %q; want %q", g, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type symlink struct{}
|
|
||||||
|
|
||||||
func (symlink) Name() string { return "some-symlink" }
|
|
||||||
func (symlink) Size() int64 { return 0 }
|
|
||||||
func (symlink) Mode() os.FileMode { return os.ModeSymlink }
|
|
||||||
func (symlink) ModTime() time.Time { return time.Time{} }
|
|
||||||
func (symlink) IsDir() bool { return false }
|
|
||||||
func (symlink) Sys() interface{} { return nil }
|
|
||||||
|
|
||||||
func TestRoundTrip(t *testing.T) {
|
|
||||||
data := []byte("some file contents")
|
|
||||||
|
|
||||||
var b bytes.Buffer
|
|
||||||
tw := NewWriter(&b)
|
|
||||||
hdr := &Header{
|
|
||||||
Name: "file.txt",
|
|
||||||
Uid: 1 << 21, // too big for 8 octal digits
|
|
||||||
Size: int64(len(data)),
|
|
||||||
ModTime: time.Now(),
|
|
||||||
}
|
|
||||||
// tar only supports second precision.
|
|
||||||
hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
|
|
||||||
if err := tw.WriteHeader(hdr); err != nil {
|
|
||||||
t.Fatalf("tw.WriteHeader: %v", err)
|
|
||||||
}
|
|
||||||
if _, err := tw.Write(data); err != nil {
|
|
||||||
t.Fatalf("tw.Write: %v", err)
|
|
||||||
}
|
|
||||||
if err := tw.Close(); err != nil {
|
|
||||||
t.Fatalf("tw.Close: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read it back.
|
|
||||||
tr := NewReader(&b)
|
|
||||||
rHdr, err := tr.Next()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("tr.Next: %v", err)
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(rHdr, hdr) {
|
|
||||||
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
|
|
||||||
}
|
|
||||||
rData, err := ioutil.ReadAll(tr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Read: %v", err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(rData, data) {
|
|
||||||
t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type headerRoundTripTest struct {
|
|
||||||
h *Header
|
|
||||||
fm os.FileMode
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHeaderRoundTrip(t *testing.T) {
|
|
||||||
golden := []headerRoundTripTest{
|
|
||||||
// regular file.
|
|
||||||
{
|
|
||||||
h: &Header{
|
|
||||||
Name: "test.txt",
|
|
||||||
Mode: 0644 | c_ISREG,
|
|
||||||
Size: 12,
|
|
||||||
ModTime: time.Unix(1360600916, 0),
|
|
||||||
Typeflag: TypeReg,
|
|
||||||
},
|
|
||||||
fm: 0644,
|
|
||||||
},
|
|
||||||
// hard link.
|
|
||||||
{
|
|
||||||
h: &Header{
|
|
||||||
Name: "hard.txt",
|
|
||||||
Mode: 0644 | c_ISLNK,
|
|
||||||
Size: 0,
|
|
||||||
ModTime: time.Unix(1360600916, 0),
|
|
||||||
Typeflag: TypeLink,
|
|
||||||
},
|
|
||||||
fm: 0644 | os.ModeSymlink,
|
|
||||||
},
|
|
||||||
// symbolic link.
|
|
||||||
{
|
|
||||||
h: &Header{
|
|
||||||
Name: "link.txt",
|
|
||||||
Mode: 0777 | c_ISLNK,
|
|
||||||
Size: 0,
|
|
||||||
ModTime: time.Unix(1360600852, 0),
|
|
||||||
Typeflag: TypeSymlink,
|
|
||||||
},
|
|
||||||
fm: 0777 | os.ModeSymlink,
|
|
||||||
},
|
|
||||||
// character device node.
|
|
||||||
{
|
|
||||||
h: &Header{
|
|
||||||
Name: "dev/null",
|
|
||||||
Mode: 0666 | c_ISCHR,
|
|
||||||
Size: 0,
|
|
||||||
ModTime: time.Unix(1360578951, 0),
|
|
||||||
Typeflag: TypeChar,
|
|
||||||
},
|
|
||||||
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
|
|
||||||
},
|
|
||||||
// block device node.
|
|
||||||
{
|
|
||||||
h: &Header{
|
|
||||||
Name: "dev/sda",
|
|
||||||
Mode: 0660 | c_ISBLK,
|
|
||||||
Size: 0,
|
|
||||||
ModTime: time.Unix(1360578954, 0),
|
|
||||||
Typeflag: TypeBlock,
|
|
||||||
},
|
|
||||||
fm: 0660 | os.ModeDevice,
|
|
||||||
},
|
|
||||||
// directory.
|
|
||||||
{
|
|
||||||
h: &Header{
|
|
||||||
Name: "dir/",
|
|
||||||
Mode: 0755 | c_ISDIR,
|
|
||||||
Size: 0,
|
|
||||||
ModTime: time.Unix(1360601116, 0),
|
|
||||||
Typeflag: TypeDir,
|
|
||||||
},
|
|
||||||
fm: 0755 | os.ModeDir,
|
|
||||||
},
|
|
||||||
// fifo node.
|
|
||||||
{
|
|
||||||
h: &Header{
|
|
||||||
Name: "dev/initctl",
|
|
||||||
Mode: 0600 | c_ISFIFO,
|
|
||||||
Size: 0,
|
|
||||||
ModTime: time.Unix(1360578949, 0),
|
|
||||||
Typeflag: TypeFifo,
|
|
||||||
},
|
|
||||||
fm: 0600 | os.ModeNamedPipe,
|
|
||||||
},
|
|
||||||
// setuid.
|
|
||||||
{
|
|
||||||
h: &Header{
|
|
||||||
Name: "bin/su",
|
|
||||||
Mode: 0755 | c_ISREG | c_ISUID,
|
|
||||||
Size: 23232,
|
|
||||||
ModTime: time.Unix(1355405093, 0),
|
|
||||||
Typeflag: TypeReg,
|
|
||||||
},
|
|
||||||
fm: 0755 | os.ModeSetuid,
|
|
||||||
},
|
|
||||||
// setguid.
|
|
||||||
{
|
|
||||||
h: &Header{
|
|
||||||
Name: "group.txt",
|
|
||||||
Mode: 0750 | c_ISREG | c_ISGID,
|
|
||||||
Size: 0,
|
|
||||||
ModTime: time.Unix(1360602346, 0),
|
|
||||||
Typeflag: TypeReg,
|
|
||||||
},
|
|
||||||
fm: 0750 | os.ModeSetgid,
|
|
||||||
},
|
|
||||||
// sticky.
|
|
||||||
{
|
|
||||||
h: &Header{
|
|
||||||
Name: "sticky.txt",
|
|
||||||
Mode: 0600 | c_ISREG | c_ISVTX,
|
|
||||||
Size: 7,
|
|
||||||
ModTime: time.Unix(1360602540, 0),
|
|
||||||
Typeflag: TypeReg,
|
|
||||||
},
|
|
||||||
fm: 0600 | os.ModeSticky,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, g := range golden {
|
|
||||||
fi := g.h.FileInfo()
|
|
||||||
h2, err := FileInfoHeader(fi, "")
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if got, want := h2.Name, g.h.Name; got != want {
|
|
||||||
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
|
|
||||||
}
|
|
||||||
if got, want := h2.Size, g.h.Size; got != want {
|
|
||||||
t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
|
|
||||||
}
|
|
||||||
if got, want := h2.Mode, g.h.Mode; got != want {
|
|
||||||
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
|
|
||||||
}
|
|
||||||
if got, want := fi.Mode(), g.fm; got != want {
|
|
||||||
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
|
|
||||||
}
|
|
||||||
if got, want := h2.ModTime, g.h.ModTime; got != want {
|
|
||||||
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
|
|
||||||
}
|
|
||||||
if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
|
|
||||||
t.Errorf("i=%d: Sys didn't return original *Header", i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
BIN
vendor/src/github.com/dotcloud/tar/testdata/gnu.tar
vendored
BIN
vendor/src/github.com/dotcloud/tar/testdata/gnu.tar
vendored
Binary file not shown.
Binary file not shown.
BIN
vendor/src/github.com/dotcloud/tar/testdata/pax.tar
vendored
BIN
vendor/src/github.com/dotcloud/tar/testdata/pax.tar
vendored
Binary file not shown.
|
@ -1 +0,0 @@
|
||||||
Kilts
|
|
|
@ -1 +0,0 @@
|
||||||
Google.com
|
|
BIN
vendor/src/github.com/dotcloud/tar/testdata/star.tar
vendored
BIN
vendor/src/github.com/dotcloud/tar/testdata/star.tar
vendored
Binary file not shown.
Binary file not shown.
BIN
vendor/src/github.com/dotcloud/tar/testdata/v7.tar
vendored
BIN
vendor/src/github.com/dotcloud/tar/testdata/v7.tar
vendored
Binary file not shown.
Binary file not shown.
Binary file not shown.
377
vendor/src/github.com/dotcloud/tar/writer.go
vendored
377
vendor/src/github.com/dotcloud/tar/writer.go
vendored
|
@ -1,377 +0,0 @@
|
||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package tar
|
|
||||||
|
|
||||||
// TODO(dsymonds):
|
|
||||||
// - catch more errors (no first header, etc.)
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
|
||||||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
|
||||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
|
||||||
errNameTooLong = errors.New("archive/tar: name too long")
|
|
||||||
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
|
|
||||||
// A tar archive consists of a sequence of files.
|
|
||||||
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
|
|
||||||
// writing at most hdr.Size bytes in total.
|
|
||||||
type Writer struct {
|
|
||||||
w io.Writer
|
|
||||||
err error
|
|
||||||
nb int64 // number of unwritten bytes for current file entry
|
|
||||||
pad int64 // amount of padding to write after current file entry
|
|
||||||
closed bool
|
|
||||||
usedBinary bool // whether the binary numeric field extension was used
|
|
||||||
preferPax bool // use pax header instead of binary numeric header
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriter creates a new Writer writing to w.
|
|
||||||
func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
|
|
||||||
|
|
||||||
// Flush finishes writing the current file (optional).
|
|
||||||
func (tw *Writer) Flush() error {
|
|
||||||
if tw.nb > 0 {
|
|
||||||
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
|
|
||||||
return tw.err
|
|
||||||
}
|
|
||||||
|
|
||||||
n := tw.nb + tw.pad
|
|
||||||
for n > 0 && tw.err == nil {
|
|
||||||
nr := n
|
|
||||||
if nr > blockSize {
|
|
||||||
nr = blockSize
|
|
||||||
}
|
|
||||||
var nw int
|
|
||||||
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
|
|
||||||
n -= int64(nw)
|
|
||||||
}
|
|
||||||
tw.nb = 0
|
|
||||||
tw.pad = 0
|
|
||||||
return tw.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write s into b, terminating it with a NUL if there is room.
|
|
||||||
// If the value is too long for the field and allowPax is true add a paxheader record instead
|
|
||||||
func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
|
|
||||||
needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
|
|
||||||
if needsPaxHeader {
|
|
||||||
paxHeaders[paxKeyword] = s
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(s) > len(b) {
|
|
||||||
if tw.err == nil {
|
|
||||||
tw.err = ErrFieldTooLong
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ascii := toASCII(s)
|
|
||||||
copy(b, ascii)
|
|
||||||
if len(ascii) < len(b) {
|
|
||||||
b[len(ascii)] = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode x as an octal ASCII string and write it into b with leading zeros.
|
|
||||||
func (tw *Writer) octal(b []byte, x int64) {
|
|
||||||
s := strconv.FormatInt(x, 8)
|
|
||||||
// leading zeros, but leave room for a NUL.
|
|
||||||
for len(s)+1 < len(b) {
|
|
||||||
s = "0" + s
|
|
||||||
}
|
|
||||||
tw.cString(b, s, false, paxNone, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write x into b, either as octal or as binary (GNUtar/star extension).
|
|
||||||
// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
|
|
||||||
func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
|
|
||||||
// Try octal first.
|
|
||||||
s := strconv.FormatInt(x, 8)
|
|
||||||
if len(s) < len(b) {
|
|
||||||
tw.octal(b, x)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it is too long for octal, and pax is preferred, use a pax header
|
|
||||||
if allowPax && tw.preferPax {
|
|
||||||
tw.octal(b, 0)
|
|
||||||
s := strconv.FormatInt(x, 10)
|
|
||||||
paxHeaders[paxKeyword] = s
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Too big: use binary (big-endian).
|
|
||||||
tw.usedBinary = true
|
|
||||||
for i := len(b) - 1; x > 0 && i >= 0; i-- {
|
|
||||||
b[i] = byte(x)
|
|
||||||
x >>= 8
|
|
||||||
}
|
|
||||||
b[0] |= 0x80 // highest bit indicates binary format
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
minTime = time.Unix(0, 0)
|
|
||||||
// There is room for 11 octal digits (33 bits) of mtime.
|
|
||||||
maxTime = minTime.Add((1<<33 - 1) * time.Second)
|
|
||||||
)
|
|
||||||
|
|
||||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
|
||||||
// WriteHeader calls Flush if it is not the first header.
|
|
||||||
// Calling after a Close will return ErrWriteAfterClose.
|
|
||||||
func (tw *Writer) WriteHeader(hdr *Header) error {
|
|
||||||
return tw.writeHeader(hdr, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
|
||||||
// WriteHeader calls Flush if it is not the first header.
|
|
||||||
// Calling after a Close will return ErrWriteAfterClose.
|
|
||||||
// As this method is called internally by writePax header to allow it to
|
|
||||||
// suppress writing the pax header.
|
|
||||||
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
|
||||||
if tw.closed {
|
|
||||||
return ErrWriteAfterClose
|
|
||||||
}
|
|
||||||
if tw.err == nil {
|
|
||||||
tw.Flush()
|
|
||||||
}
|
|
||||||
if tw.err != nil {
|
|
||||||
return tw.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// a map to hold pax header records, if any are needed
|
|
||||||
paxHeaders := make(map[string]string)
|
|
||||||
|
|
||||||
// TODO(shanemhansen): we might want to use PAX headers for
|
|
||||||
// subsecond time resolution, but for now let's just capture
|
|
||||||
// too long fields or non ascii characters
|
|
||||||
|
|
||||||
header := make([]byte, blockSize)
|
|
||||||
s := slicer(header)
|
|
||||||
|
|
||||||
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
|
||||||
pathHeaderBytes := s.next(fileNameSize)
|
|
||||||
|
|
||||||
tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
|
|
||||||
|
|
||||||
// Handle out of range ModTime carefully.
|
|
||||||
var modTime int64
|
|
||||||
if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
|
|
||||||
modTime = hdr.ModTime.Unix()
|
|
||||||
}
|
|
||||||
|
|
||||||
tw.octal(s.next(8), hdr.Mode) // 100:108
|
|
||||||
tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
|
|
||||||
tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
|
|
||||||
tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136
|
|
||||||
tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity
|
|
||||||
s.next(8) // chksum (148:156)
|
|
||||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
|
||||||
|
|
||||||
tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
|
|
||||||
|
|
||||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
|
||||||
tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
|
|
||||||
tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
|
|
||||||
tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337
|
|
||||||
tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345
|
|
||||||
|
|
||||||
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
|
||||||
prefixHeaderBytes := s.next(155)
|
|
||||||
tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix
|
|
||||||
|
|
||||||
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
|
|
||||||
if tw.usedBinary {
|
|
||||||
copy(header[257:265], []byte("ustar \x00"))
|
|
||||||
}
|
|
||||||
|
|
||||||
_, paxPathUsed := paxHeaders[paxPath]
|
|
||||||
// try to use a ustar header when only the name is too long
|
|
||||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
|
||||||
suffix := hdr.Name
|
|
||||||
prefix := ""
|
|
||||||
if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
|
|
||||||
var err error
|
|
||||||
prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
|
|
||||||
if err == nil {
|
|
||||||
// ok we can use a ustar long name instead of pax, now correct the fields
|
|
||||||
|
|
||||||
// remove the path field from the pax header. this will suppress the pax header
|
|
||||||
delete(paxHeaders, paxPath)
|
|
||||||
|
|
||||||
// update the path fields
|
|
||||||
tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
|
|
||||||
tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
|
|
||||||
|
|
||||||
// Use the ustar magic if we used ustar long names.
|
|
||||||
if len(prefix) > 0 {
|
|
||||||
copy(header[257:265], []byte("ustar\000"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The chksum field is terminated by a NUL and a space.
|
|
||||||
// This is different from the other octal fields.
|
|
||||||
chksum, _ := checksum(header)
|
|
||||||
tw.octal(header[148:155], chksum)
|
|
||||||
header[155] = ' '
|
|
||||||
|
|
||||||
if tw.err != nil {
|
|
||||||
// problem with header; probably integer too big for a field.
|
|
||||||
return tw.err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(paxHeaders) > 0 {
|
|
||||||
if !allowPax {
|
|
||||||
return errInvalidHeader
|
|
||||||
}
|
|
||||||
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tw.nb = int64(hdr.Size)
|
|
||||||
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
|
|
||||||
|
|
||||||
_, tw.err = tw.w.Write(header)
|
|
||||||
return tw.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeUSTARLongName splits a USTAR long name hdr.Name.
|
|
||||||
// name must be < 256 characters. errNameTooLong is returned
|
|
||||||
// if hdr.Name can't be split. The splitting heuristic
|
|
||||||
// is compatible with gnu tar.
|
|
||||||
func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
|
|
||||||
length := len(name)
|
|
||||||
if length > fileNamePrefixSize+1 {
|
|
||||||
length = fileNamePrefixSize + 1
|
|
||||||
} else if name[length-1] == '/' {
|
|
||||||
length--
|
|
||||||
}
|
|
||||||
i := strings.LastIndex(name[:length], "/")
|
|
||||||
// nlen contains the resulting length in the name field.
|
|
||||||
// plen contains the resulting length in the prefix field.
|
|
||||||
nlen := len(name) - i - 1
|
|
||||||
plen := i
|
|
||||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
|
||||||
err = errNameTooLong
|
|
||||||
return
|
|
||||||
}
|
|
||||||
prefix, suffix = name[:i], name[i+1:]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// writePaxHeader writes an extended pax header to the
|
|
||||||
// archive.
|
|
||||||
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
|
|
||||||
// Prepare extended header
|
|
||||||
ext := new(Header)
|
|
||||||
ext.Typeflag = TypeXHeader
|
|
||||||
// Setting ModTime is required for reader parsing to
|
|
||||||
// succeed, and seems harmless enough.
|
|
||||||
ext.ModTime = hdr.ModTime
|
|
||||||
// The spec asks that we namespace our pseudo files
|
|
||||||
// with the current pid.
|
|
||||||
pid := os.Getpid()
|
|
||||||
dir, file := path.Split(hdr.Name)
|
|
||||||
fullName := path.Join(dir,
|
|
||||||
fmt.Sprintf("PaxHeaders.%d", pid), file)
|
|
||||||
|
|
||||||
ascii := toASCII(fullName)
|
|
||||||
if len(ascii) > 100 {
|
|
||||||
ascii = ascii[:100]
|
|
||||||
}
|
|
||||||
ext.Name = ascii
|
|
||||||
// Construct the body
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
for k, v := range paxHeaders {
|
|
||||||
fmt.Fprint(&buf, paxHeader(k+"="+v))
|
|
||||||
}
|
|
||||||
|
|
||||||
ext.Size = int64(len(buf.Bytes()))
|
|
||||||
if err := tw.writeHeader(ext, false); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := tw.Write(buf.Bytes()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := tw.Flush(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// paxHeader formats a single pax record, prefixing it with the appropriate length
|
|
||||||
func paxHeader(msg string) string {
|
|
||||||
const padding = 2 // Extra padding for space and newline
|
|
||||||
size := len(msg) + padding
|
|
||||||
size += len(strconv.Itoa(size))
|
|
||||||
record := fmt.Sprintf("%d %s\n", size, msg)
|
|
||||||
if len(record) != size {
|
|
||||||
// Final adjustment if adding size increased
|
|
||||||
// the number of digits in size
|
|
||||||
size = len(record)
|
|
||||||
record = fmt.Sprintf("%d %s\n", size, msg)
|
|
||||||
}
|
|
||||||
return record
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes to the current entry in the tar archive.
|
|
||||||
// Write returns the error ErrWriteTooLong if more than
|
|
||||||
// hdr.Size bytes are written after WriteHeader.
|
|
||||||
func (tw *Writer) Write(b []byte) (n int, err error) {
|
|
||||||
if tw.closed {
|
|
||||||
err = ErrWriteTooLong
|
|
||||||
return
|
|
||||||
}
|
|
||||||
overwrite := false
|
|
||||||
if int64(len(b)) > tw.nb {
|
|
||||||
b = b[0:tw.nb]
|
|
||||||
overwrite = true
|
|
||||||
}
|
|
||||||
n, err = tw.w.Write(b)
|
|
||||||
tw.nb -= int64(n)
|
|
||||||
if err == nil && overwrite {
|
|
||||||
err = ErrWriteTooLong
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tw.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the tar archive, flushing any unwritten
|
|
||||||
// data to the underlying writer.
|
|
||||||
func (tw *Writer) Close() error {
|
|
||||||
if tw.err != nil || tw.closed {
|
|
||||||
return tw.err
|
|
||||||
}
|
|
||||||
tw.Flush()
|
|
||||||
tw.closed = true
|
|
||||||
if tw.err != nil {
|
|
||||||
return tw.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// trailer: two zero blocks
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
_, tw.err = tw.w.Write(zeroBlock)
|
|
||||||
if tw.err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tw.err
|
|
||||||
}
|
|
393
vendor/src/github.com/dotcloud/tar/writer_test.go
vendored
393
vendor/src/github.com/dotcloud/tar/writer_test.go
vendored
|
@ -1,393 +0,0 @@
|
||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package tar
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"testing/iotest"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type writerTestEntry struct {
|
|
||||||
header *Header
|
|
||||||
contents string
|
|
||||||
}
|
|
||||||
|
|
||||||
type writerTest struct {
|
|
||||||
file string // filename of expected output
|
|
||||||
entries []*writerTestEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
var writerTests = []*writerTest{
|
|
||||||
// The writer test file was produced with this command:
|
|
||||||
// tar (GNU tar) 1.26
|
|
||||||
// ln -s small.txt link.txt
|
|
||||||
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
|
|
||||||
{
|
|
||||||
file: "testdata/writer.tar",
|
|
||||||
entries: []*writerTestEntry{
|
|
||||||
{
|
|
||||||
header: &Header{
|
|
||||||
Name: "small.txt",
|
|
||||||
Mode: 0640,
|
|
||||||
Uid: 73025,
|
|
||||||
Gid: 5000,
|
|
||||||
Size: 5,
|
|
||||||
ModTime: time.Unix(1246508266, 0),
|
|
||||||
Typeflag: '0',
|
|
||||||
Uname: "dsymonds",
|
|
||||||
Gname: "eng",
|
|
||||||
},
|
|
||||||
contents: "Kilts",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: &Header{
|
|
||||||
Name: "small2.txt",
|
|
||||||
Mode: 0640,
|
|
||||||
Uid: 73025,
|
|
||||||
Gid: 5000,
|
|
||||||
Size: 11,
|
|
||||||
ModTime: time.Unix(1245217492, 0),
|
|
||||||
Typeflag: '0',
|
|
||||||
Uname: "dsymonds",
|
|
||||||
Gname: "eng",
|
|
||||||
},
|
|
||||||
contents: "Google.com\n",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
header: &Header{
|
|
||||||
Name: "link.txt",
|
|
||||||
Mode: 0777,
|
|
||||||
Uid: 1000,
|
|
||||||
Gid: 1000,
|
|
||||||
Size: 0,
|
|
||||||
ModTime: time.Unix(1314603082, 0),
|
|
||||||
Typeflag: '2',
|
|
||||||
Linkname: "small.txt",
|
|
||||||
Uname: "strings",
|
|
||||||
Gname: "strings",
|
|
||||||
},
|
|
||||||
// no contents
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
// The truncated test file was produced using these commands:
|
|
||||||
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
|
|
||||||
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
|
|
||||||
{
|
|
||||||
file: "testdata/writer-big.tar",
|
|
||||||
entries: []*writerTestEntry{
|
|
||||||
{
|
|
||||||
header: &Header{
|
|
||||||
Name: "tmp/16gig.txt",
|
|
||||||
Mode: 0640,
|
|
||||||
Uid: 73025,
|
|
||||||
Gid: 5000,
|
|
||||||
Size: 16 << 30,
|
|
||||||
ModTime: time.Unix(1254699560, 0),
|
|
||||||
Typeflag: '0',
|
|
||||||
Uname: "dsymonds",
|
|
||||||
Gname: "eng",
|
|
||||||
},
|
|
||||||
// fake contents
|
|
||||||
contents: strings.Repeat("\x00", 4<<10),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
// This file was produced using gnu tar 1.17
|
|
||||||
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
|
|
||||||
{
|
|
||||||
file: "testdata/ustar.tar",
|
|
||||||
entries: []*writerTestEntry{
|
|
||||||
{
|
|
||||||
header: &Header{
|
|
||||||
Name: strings.Repeat("longname/", 15) + "file.txt",
|
|
||||||
Mode: 0644,
|
|
||||||
Uid: 0765,
|
|
||||||
Gid: 024,
|
|
||||||
Size: 06,
|
|
||||||
ModTime: time.Unix(1360135598, 0),
|
|
||||||
Typeflag: '0',
|
|
||||||
Uname: "shane",
|
|
||||||
Gname: "staff",
|
|
||||||
},
|
|
||||||
contents: "hello\n",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
|
|
||||||
func bytestr(offset int, b []byte) string {
|
|
||||||
const rowLen = 32
|
|
||||||
s := fmt.Sprintf("%04x ", offset)
|
|
||||||
for _, ch := range b {
|
|
||||||
switch {
|
|
||||||
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
|
|
||||||
s += fmt.Sprintf(" %c", ch)
|
|
||||||
default:
|
|
||||||
s += fmt.Sprintf(" %02x", ch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render a pseudo-diff between two blocks of bytes.
|
|
||||||
func bytediff(a []byte, b []byte) string {
|
|
||||||
const rowLen = 32
|
|
||||||
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
|
|
||||||
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
|
|
||||||
na, nb := rowLen, rowLen
|
|
||||||
if na > len(a) {
|
|
||||||
na = len(a)
|
|
||||||
}
|
|
||||||
if nb > len(b) {
|
|
||||||
nb = len(b)
|
|
||||||
}
|
|
||||||
sa := bytestr(offset, a[0:na])
|
|
||||||
sb := bytestr(offset, b[0:nb])
|
|
||||||
if sa != sb {
|
|
||||||
s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
|
|
||||||
}
|
|
||||||
a = a[na:]
|
|
||||||
b = b[nb:]
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriter(t *testing.T) {
|
|
||||||
testLoop:
|
|
||||||
for i, test := range writerTests {
|
|
||||||
expected, err := ioutil.ReadFile(test.file)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
|
|
||||||
big := false
|
|
||||||
for j, entry := range test.entries {
|
|
||||||
big = big || entry.header.Size > 1<<10
|
|
||||||
if err := tw.WriteHeader(entry.header); err != nil {
|
|
||||||
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
|
|
||||||
continue testLoop
|
|
||||||
}
|
|
||||||
if _, err := io.WriteString(tw, entry.contents); err != nil {
|
|
||||||
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
|
|
||||||
continue testLoop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Only interested in Close failures for the small tests.
|
|
||||||
if err := tw.Close(); err != nil && !big {
|
|
||||||
t.Errorf("test %d: Failed closing archive: %v", i, err)
|
|
||||||
continue testLoop
|
|
||||||
}
|
|
||||||
|
|
||||||
actual := buf.Bytes()
|
|
||||||
if !bytes.Equal(expected, actual) {
|
|
||||||
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
|
|
||||||
i, bytediff(expected, actual))
|
|
||||||
}
|
|
||||||
if testing.Short() { // The second test is expensive.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPax(t *testing.T) {
|
|
||||||
// Create an archive with a large name
|
|
||||||
fileinfo, err := os.Stat("testdata/small.txt")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
hdr, err := FileInfoHeader(fileinfo, "")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("os.Stat: %v", err)
|
|
||||||
}
|
|
||||||
// Force a PAX long name to be written
|
|
||||||
longName := strings.Repeat("ab", 100)
|
|
||||||
contents := strings.Repeat(" ", int(hdr.Size))
|
|
||||||
hdr.Name = longName
|
|
||||||
var buf bytes.Buffer
|
|
||||||
writer := NewWriter(&buf)
|
|
||||||
if err := writer.WriteHeader(hdr); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := writer.Close(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// Simple test to make sure PAX extensions are in effect
|
|
||||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
|
|
||||||
t.Fatal("Expected at least one PAX header to be written.")
|
|
||||||
}
|
|
||||||
// Test that we can get a long name back out of the archive.
|
|
||||||
reader := NewReader(&buf)
|
|
||||||
hdr, err = reader.Next()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if hdr.Name != longName {
|
|
||||||
t.Fatal("Couldn't recover long file name")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPaxSymlink(t *testing.T) {
|
|
||||||
// Create an archive with a large linkname
|
|
||||||
fileinfo, err := os.Stat("testdata/small.txt")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
hdr, err := FileInfoHeader(fileinfo, "")
|
|
||||||
hdr.Typeflag = TypeSymlink
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("os.Stat:1 %v", err)
|
|
||||||
}
|
|
||||||
// Force a PAX long linkname to be written
|
|
||||||
longLinkname := strings.Repeat("1234567890/1234567890", 10)
|
|
||||||
hdr.Linkname = longLinkname
|
|
||||||
|
|
||||||
hdr.Size = 0
|
|
||||||
var buf bytes.Buffer
|
|
||||||
writer := NewWriter(&buf)
|
|
||||||
if err := writer.WriteHeader(hdr); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := writer.Close(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// Simple test to make sure PAX extensions are in effect
|
|
||||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
|
|
||||||
t.Fatal("Expected at least one PAX header to be written.")
|
|
||||||
}
|
|
||||||
// Test that we can get a long name back out of the archive.
|
|
||||||
reader := NewReader(&buf)
|
|
||||||
hdr, err = reader.Next()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if hdr.Linkname != longLinkname {
|
|
||||||
t.Fatal("Couldn't recover long link name")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPaxNonAscii(t *testing.T) {
|
|
||||||
// Create an archive with non ascii. These should trigger a pax header
|
|
||||||
// because pax headers have a defined utf-8 encoding.
|
|
||||||
fileinfo, err := os.Stat("testdata/small.txt")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr, err := FileInfoHeader(fileinfo, "")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("os.Stat:1 %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// some sample data
|
|
||||||
chineseFilename := "文件名"
|
|
||||||
chineseGroupname := "組"
|
|
||||||
chineseUsername := "用戶名"
|
|
||||||
|
|
||||||
hdr.Name = chineseFilename
|
|
||||||
hdr.Gname = chineseGroupname
|
|
||||||
hdr.Uname = chineseUsername
|
|
||||||
|
|
||||||
contents := strings.Repeat(" ", int(hdr.Size))
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
writer := NewWriter(&buf)
|
|
||||||
if err := writer.WriteHeader(hdr); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := writer.Close(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// Simple test to make sure PAX extensions are in effect
|
|
||||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
|
|
||||||
t.Fatal("Expected at least one PAX header to be written.")
|
|
||||||
}
|
|
||||||
// Test that we can get a long name back out of the archive.
|
|
||||||
reader := NewReader(&buf)
|
|
||||||
hdr, err = reader.Next()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if hdr.Name != chineseFilename {
|
|
||||||
t.Fatal("Couldn't recover unicode name")
|
|
||||||
}
|
|
||||||
if hdr.Gname != chineseGroupname {
|
|
||||||
t.Fatal("Couldn't recover unicode group")
|
|
||||||
}
|
|
||||||
if hdr.Uname != chineseUsername {
|
|
||||||
t.Fatal("Couldn't recover unicode user")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPAXHeader(t *testing.T) {
|
|
||||||
medName := strings.Repeat("CD", 50)
|
|
||||||
longName := strings.Repeat("AB", 100)
|
|
||||||
paxTests := [][2]string{
|
|
||||||
{paxPath + "=/etc/hosts", "19 path=/etc/hosts\n"},
|
|
||||||
{"a=b", "6 a=b\n"}, // Single digit length
|
|
||||||
{"a=names", "11 a=names\n"}, // Test case involving carries
|
|
||||||
{paxPath + "=" + longName, fmt.Sprintf("210 path=%s\n", longName)},
|
|
||||||
{paxPath + "=" + medName, fmt.Sprintf("110 path=%s\n", medName)}}
|
|
||||||
|
|
||||||
for _, test := range paxTests {
|
|
||||||
key, expected := test[0], test[1]
|
|
||||||
if result := paxHeader(key); result != expected {
|
|
||||||
t.Fatalf("paxHeader: got %s, expected %s", result, expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUSTARLongName(t *testing.T) {
|
|
||||||
// Create an archive with a path that failed to split with USTAR extension in previous versions.
|
|
||||||
fileinfo, err := os.Stat("testdata/small.txt")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
hdr, err := FileInfoHeader(fileinfo, "")
|
|
||||||
hdr.Typeflag = TypeDir
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("os.Stat:1 %v", err)
|
|
||||||
}
|
|
||||||
// Force a PAX long name to be written. The name was taken from a practical example
|
|
||||||
// that fails and replaced ever char through numbers to anonymize the sample.
|
|
||||||
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
|
|
||||||
hdr.Name = longName
|
|
||||||
|
|
||||||
hdr.Size = 0
|
|
||||||
var buf bytes.Buffer
|
|
||||||
writer := NewWriter(&buf)
|
|
||||||
if err := writer.WriteHeader(hdr); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := writer.Close(); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// Test that we can get a long name back out of the archive.
|
|
||||||
reader := NewReader(&buf)
|
|
||||||
hdr, err = reader.Next()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if hdr.Name != longName {
|
|
||||||
t.Fatal("Couldn't recover long name")
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in a new issue