1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #35424 from cyphar/tar-split-cve_2017_14992

vendor: update to github.com/vbatts/tar-split@v0.10.2
This commit is contained in:
Sebastiaan van Stijn 2017-11-07 23:09:38 +01:00 committed by GitHub
commit bd8ed57506
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 81 additions and 17 deletions

View file

@ -0,0 +1,36 @@
package image
import (
"archive/tar"
"bytes"
"context"
"io"
"testing"
"github.com/docker/docker/api/types"
"github.com/docker/docker/integration/util/request"
"github.com/docker/docker/internal/testutil"
)
// Ensure we don't regress on CVE-2017-14992.
func TestImportExtremelyLargeImageWorks(t *testing.T) {
client := request.NewAPIClient(t)
// Construct an empty tar archive with about 8GB of junk padding at the
// end. This should not cause any crashes (the padding should be mostly
// ignored).
var tarBuffer bytes.Buffer
tw := tar.NewWriter(&tarBuffer)
if err := tw.Close(); err != nil {
t.Fatal(err)
}
imageRdr := io.MultiReader(&tarBuffer, io.LimitReader(testutil.DevZero, 8*1024*1024*1024))
_, err := client.ImageImport(context.Background(),
types.ImageImportSource{Source: imageRdr, SourceName: "-"},
"test1234:v42",
types.ImageImportOptions{})
if err != nil {
t.Fatal(err)
}
}

View file

@ -1,6 +1,8 @@
package testutil
import (
"io"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -11,3 +13,15 @@ func ErrorContains(t require.TestingT, err error, expectedError string, msgAndAr
require.Error(t, err, msgAndArgs...)
assert.Contains(t, err.Error(), expectedError, msgAndArgs...)
}
// DevZero acts like /dev/zero but in an OS-independent fashion.
var DevZero io.Reader = devZero{}
type devZero struct{}
func (d devZero) Read(p []byte) (n int, err error) {
for i := 0; i < len(p); i++ {
p[i] = '\x00'
}
return len(p), nil
}

View file

@ -55,7 +55,7 @@ github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
# get graph and distribution packages
github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
github.com/vbatts/tar-split v0.10.1
github.com/vbatts/tar-split v0.10.2
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
# get go-zfs packages

View file

@ -1,6 +1,7 @@
# tar-split
[![Build Status](https://travis-ci.org/vbatts/tar-split.svg?branch=master)](https://travis-ci.org/vbatts/tar-split)
[![Go Report Card](https://goreportcard.com/badge/github.com/vbatts/tar-split)](https://goreportcard.com/report/github.com/vbatts/tar-split)
Pristinely disassembling a tar archive, and stashing needed raw bytes and offsets to reassemble a validating original archive.
@ -50,7 +51,7 @@ For example stored sparse files that have "holes" in them, will be read as a
contiguous file, though the archive contents may be recorded in sparse format.
Therefore when adding the file payload to a reassembled tar, to achieve
identical output, the file payload would need be precisely re-sparsified. This
is not something I seek to fix imediately, but would rather have an alert that
is not something I seek to fix immediately, but would rather have an alert that
precise reassembly is not possible.
(see more http://www.gnu.org/software/tar/manual/html_node/Sparse-Formats.html)

View file

@ -2,7 +2,6 @@ package asm
import (
"io"
"io/ioutil"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/storage"
@ -119,21 +118,35 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
}
}
// it is allowable, and not uncommon that there is further padding on the
// end of an archive, apart from the expected 1024 null bytes.
remainder, err := ioutil.ReadAll(outputRdr)
if err != nil && err != io.EOF {
// It is allowable, and not uncommon that there is further padding on
// the end of an archive, apart from the expected 1024 null bytes. We
// do this in chunks rather than in one go to avoid cases where a
// maliciously crafted tar file tries to trick us into reading many GBs
// into memory.
const paddingChunkSize = 1024 * 1024
var paddingChunk [paddingChunkSize]byte
for {
var isEOF bool
n, err := outputRdr.Read(paddingChunk[:])
if err != nil {
if err != io.EOF {
pW.CloseWithError(err)
return
}
isEOF = true
}
_, err = p.AddEntry(storage.Entry{
Type: storage.SegmentType,
Payload: remainder,
Payload: paddingChunk[:n],
})
if err != nil {
pW.CloseWithError(err)
return
}
if isEOF {
break
}
}
pW.Close()
}()