Merge pull request #15492 from vbatts/update-tar-split

vendor: update tar-split to v0.9.6
This commit is contained in:
Jessie Frazelle 2015-08-21 14:45:19 -07:00
commit e5e6eaacee
8 changed files with 56 additions and 54 deletions

View File

@ -36,7 +36,7 @@ clone git github.com/hashicorp/consul v0.5.2
# get graph and distribution packages
clone git github.com/docker/distribution 7dc8d4a26b689bd4892f2f2322dbce0b7119d686
clone git github.com/vbatts/tar-split v0.9.4
clone git github.com/vbatts/tar-split v0.9.6
clone git github.com/docker/notary 8e8122eb5528f621afcd4e2854c47302f17392f7
clone git github.com/endophage/gotuf a592b03b28b02bb29bb5878308fb1abed63383b5

View File

@ -159,17 +159,19 @@ func (tr *Reader) Next() (*Header, error) {
if err != nil {
return nil, err
}
var b []byte
var buf []byte
if tr.RawAccounting {
if _, err = tr.rawBytes.Write(realname); err != nil {
return nil, err
}
b = tr.RawBytes()
buf = make([]byte, tr.rawBytes.Len())
copy(buf[:], tr.RawBytes())
}
hdr, err := tr.Next()
// since the above call to Next() resets the buffer, we need to throw the bytes over
if tr.RawAccounting {
if _, err = tr.rawBytes.Write(b); err != nil {
buf = append(buf, tr.RawBytes()...)
if _, err = tr.rawBytes.Write(buf); err != nil {
return nil, err
}
}
@ -181,17 +183,19 @@ func (tr *Reader) Next() (*Header, error) {
if err != nil {
return nil, err
}
var b []byte
var buf []byte
if tr.RawAccounting {
if _, err = tr.rawBytes.Write(realname); err != nil {
return nil, err
}
b = tr.RawBytes()
buf = make([]byte, tr.rawBytes.Len())
copy(buf[:], tr.RawBytes())
}
hdr, err := tr.Next()
// since the above call to Next() resets the buffer, we need to throw the bytes over
if tr.RawAccounting {
if _, err = tr.rawBytes.Write(b); err != nil {
buf = append(buf, tr.RawBytes()...)
if _, err = tr.rawBytes.Write(buf); err != nil {
return nil, err
}
}

View File

@ -9,7 +9,7 @@ import (
"github.com/vbatts/tar-split/tar/storage"
)
// NewOutputTarStream returns an io.ReadCloser that is an assemble tar archive
// NewOutputTarStream returns an io.ReadCloser that is an assembled tar archive
// stream.
//
// It takes a storage.FileGetter, for mapping the file payloads that are to be read in,
@ -62,7 +62,6 @@ func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadClose
fh.Close()
}
}
pw.Close()
}()
return pr
}

View File

@ -22,8 +22,8 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
// What to do here... folks will want their own access to the Reader that is
// their tar archive stream, but we'll need that same stream to use our
// forked 'archive/tar'.
// Perhaps do an io.TeeReader that hand back an io.Reader for them to read
// from, and we'll mitm the stream to store metadata.
// Perhaps do an io.TeeReader that hands back an io.Reader for them to read
// from, and we'll MITM the stream to store metadata.
// We'll need a storage.FilePutter too ...
// Another concern, whether to do any storage.FilePutter operations, such that we
@ -32,7 +32,7 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
// Perhaps we have a DiscardFilePutter that is a bit bucket.
// we'll return the pipe reader, since TeeReader does not buffer and will
// only read what the outputRdr Read's. Since Tar archive's have padding on
// only read what the outputRdr Read's. Since Tar archives have padding on
// the end, we want to be the one reading the padding, even if the user's
// `archive/tar` doesn't care.
pR, pW := io.Pipe()
@ -55,13 +55,15 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
}
// even when an EOF is reached, there is often 1024 null bytes on
// the end of an archive. Collect them too.
_, err := p.AddEntry(storage.Entry{
Type: storage.SegmentType,
Payload: tr.RawBytes(),
})
if err != nil {
pW.CloseWithError(err)
return
if b := tr.RawBytes(); len(b) > 0 {
_, err := p.AddEntry(storage.Entry{
Type: storage.SegmentType,
Payload: b,
})
if err != nil {
pW.CloseWithError(err)
return
}
}
break // not return. We need the end of the reader.
}
@ -69,12 +71,15 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
break // not return. We need the end of the reader.
}
if _, err := p.AddEntry(storage.Entry{
Type: storage.SegmentType,
Payload: tr.RawBytes(),
}); err != nil {
pW.CloseWithError(err)
return
if b := tr.RawBytes(); len(b) > 0 {
_, err := p.AddEntry(storage.Entry{
Type: storage.SegmentType,
Payload: b,
})
if err != nil {
pW.CloseWithError(err)
return
}
}
var csum []byte

View File

@ -5,7 +5,7 @@ Packing and unpacking the Entries of the stream. The types of streams are
either segments of raw bytes (for the raw headers and various padding) and for
an entry marking a file payload.
The raw bytes are stored precisely in the packed (marshalled) Entry. Where as
The raw bytes are stored precisely in the packed (marshalled) Entry, whereas
the file payload marker include the name of the file, size, and crc64 checksum
(for basic file integrity).
*/

View File

@ -19,11 +19,11 @@ const (
// SegmentType represents a raw bytes segment from the archive stream. These raw
// byte segments consist of the raw headers and various padding.
//
// It's payload is to be marshalled base64 encoded.
// Its payload is to be marshalled base64 encoded.
SegmentType
)
// Entry is a the structure for packing and unpacking the information read from
// Entry is the structure for packing and unpacking the information read from
// the Tar archive.
//
// FileType Payload checksum is using `hash/crc64` for basic file integrity,
@ -32,8 +32,8 @@ const (
// collisions in a sample of 18.2 million, CRC64 had none.
type Entry struct {
Type Type `json:"type"`
Name string `json:"name",omitempty`
Size int64 `json:"size",omitempty`
Payload []byte `json:"payload"` // SegmentType store payload here; FileType store crc64 checksum here;
Name string `json:"name,omitempty"`
Size int64 `json:"size,omitempty"`
Payload []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here;
Position int `json:"position"`
}

View File

@ -5,14 +5,13 @@ import (
"errors"
"hash/crc64"
"io"
"io/ioutil"
"os"
"path/filepath"
)
// FileGetter is the interface for getting a stream of a file payload, address
// by name/filename. Presumably, the names will be scoped to relative file
// paths.
// FileGetter is the interface for getting a stream of a file payload,
// addressed by name/filename. Presumably, the names will be scoped to relative
// file paths.
type FileGetter interface {
// Get returns a stream for the provided file path
Get(filename string) (output io.ReadCloser, err error)
@ -60,15 +59,15 @@ func (bfgp bufferFileGetPutter) Get(name string) (io.ReadCloser, error) {
}
func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) {
c := crc64.New(CRCTable)
tRdr := io.TeeReader(r, c)
b := bytes.NewBuffer([]byte{})
i, err := io.Copy(b, tRdr)
crc := crc64.New(CRCTable)
buf := bytes.NewBuffer(nil)
cw := io.MultiWriter(crc, buf)
i, err := io.Copy(cw, r)
if err != nil {
return 0, nil, err
}
bfgp.files[name] = b.Bytes()
return i, c.Sum(nil), nil
bfgp.files[name] = buf.Bytes()
return i, crc.Sum(nil), nil
}
type readCloserWrapper struct {
@ -77,7 +76,7 @@ type readCloserWrapper struct {
func (w *readCloserWrapper) Close() error { return nil }
// NewBufferFileGetPutter is simple in memory FileGetPutter
// NewBufferFileGetPutter is a simple in-memory FileGetPutter
//
// Implication is this is memory intensive...
// Probably best for testing or light weight cases.
@ -97,8 +96,7 @@ type bitBucketFilePutter struct {
func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) {
c := crc64.New(CRCTable)
tRdr := io.TeeReader(r, c)
i, err := io.Copy(ioutil.Discard, tRdr)
i, err := io.Copy(c, r)
return i, c.Sum(nil), err
}

View File

@ -8,8 +8,8 @@ import (
"path/filepath"
)
// ErrDuplicatePath is occured when a tar archive has more than one entry for
// the same file path
// ErrDuplicatePath occurs when a tar archive has more than one entry for the
// same file path
var ErrDuplicatePath = errors.New("duplicates of file paths not supported")
// Packer describes the methods to pack Entries to a storage destination
@ -65,7 +65,7 @@ func (jup *jsonUnpacker) Next() (*Entry, error) {
if _, ok := jup.seen[cName]; ok {
return nil, ErrDuplicatePath
}
jup.seen[cName] = emptyByte
jup.seen[cName] = struct{}{}
}
return &e, err
@ -90,11 +90,7 @@ type jsonPacker struct {
seen seenNames
}
type seenNames map[string]byte
// used in the seenNames map. byte is a uint8, and we'll re-use the same one
// for minimalism.
const emptyByte byte = 0
type seenNames map[string]struct{}
func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
// check early for dup name
@ -103,7 +99,7 @@ func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
if _, ok := jp.seen[cName]; ok {
return -1, ErrDuplicatePath
}
jp.seen[cName] = emptyByte
jp.seen[cName] = struct{}{}
}
e.Position = jp.pos
@ -117,7 +113,7 @@ func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
return e.Position, nil
}
// NewJSONPacker provides an Packer that writes each Entry (SegmentType and
// NewJSONPacker provides a Packer that writes each Entry (SegmentType and
// FileType) as a json document.
//
// The Entries are delimited by new line.