mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Fix uses of "int" where "int64" should be used instead
Some structures use int for sizes and UNIX timestamps. On some platforms, int is 32 bits, so this can lead to the year 2038 issues and overflows when dealing with large containers or layers. Consistently use int64 to store sizes and UNIX timestamps in api/types/types.go. Update related to code accordingly (i.e. strconv.FormatInt instead of strconv.Itoa). Use int64 in progressreader package to avoid integer overflow when dealing with large quantities. Update related code accordingly. Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
This commit is contained in:
parent
8f2dca5386
commit
1f61084d83
22 changed files with 56 additions and 56 deletions
|
@ -462,7 +462,7 @@ func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (absCont
|
||||||
In: response.Body,
|
In: response.Body,
|
||||||
Out: out,
|
Out: out,
|
||||||
Formatter: streamformatter.NewStreamFormatter(),
|
Formatter: streamformatter.NewStreamFormatter(),
|
||||||
Size: int(response.ContentLength),
|
Size: response.ContentLength,
|
||||||
NewLines: true,
|
NewLines: true,
|
||||||
ID: "",
|
ID: "",
|
||||||
Action: fmt.Sprintf("Downloading build context from remote url: %s", remoteURL),
|
Action: fmt.Sprintf("Downloading build context from remote url: %s", remoteURL),
|
||||||
|
|
|
@ -27,7 +27,7 @@ func TestContainerContextID(t *testing.T) {
|
||||||
{types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image},
|
{types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image},
|
||||||
{types.Container{Image: ""}, true, "<no image>", imageHeader, ctx.Image},
|
{types.Container{Image: ""}, true, "<no image>", imageHeader, ctx.Image},
|
||||||
{types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command},
|
{types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command},
|
||||||
{types.Container{Created: int(unix)}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt},
|
{types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt},
|
||||||
{types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports},
|
{types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports},
|
||||||
{types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status},
|
{types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status},
|
||||||
{types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size},
|
{types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size},
|
||||||
|
|
|
@ -70,9 +70,9 @@ type Image struct {
|
||||||
ParentId string
|
ParentId string
|
||||||
RepoTags []string
|
RepoTags []string
|
||||||
RepoDigests []string
|
RepoDigests []string
|
||||||
Created int
|
Created int64
|
||||||
Size int
|
Size int64
|
||||||
VirtualSize int
|
VirtualSize int64
|
||||||
Labels map[string]string
|
Labels map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,10 +112,10 @@ type Container struct {
|
||||||
Names []string
|
Names []string
|
||||||
Image string
|
Image string
|
||||||
Command string
|
Command string
|
||||||
Created int
|
Created int64
|
||||||
Ports []Port
|
Ports []Port
|
||||||
SizeRw int `json:",omitempty"`
|
SizeRw int64 `json:",omitempty"`
|
||||||
SizeRootFs int `json:",omitempty"`
|
SizeRootFs int64 `json:",omitempty"`
|
||||||
Labels map[string]string
|
Labels map[string]string
|
||||||
Status string
|
Status string
|
||||||
HostConfig struct {
|
HostConfig struct {
|
||||||
|
|
|
@ -330,7 +330,7 @@ func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath stri
|
||||||
In: resp.Body,
|
In: resp.Body,
|
||||||
Out: b.OutOld,
|
Out: b.OutOld,
|
||||||
Formatter: b.StreamFormatter,
|
Formatter: b.StreamFormatter,
|
||||||
Size: int(resp.ContentLength),
|
Size: resp.ContentLength,
|
||||||
NewLines: true,
|
NewLines: true,
|
||||||
ID: "",
|
ID: "",
|
||||||
Action: "Downloading",
|
Action: "Downloading",
|
||||||
|
|
|
@ -139,7 +139,7 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
|
||||||
}
|
}
|
||||||
defer f.Body.Close()
|
defer f.Body.Close()
|
||||||
ct := f.Header.Get("Content-Type")
|
ct := f.Header.Get("Content-Type")
|
||||||
clen := int(f.ContentLength)
|
clen := f.ContentLength
|
||||||
contentType, bodyReader, err := inspectResponse(ct, f.Body, clen)
|
contentType, bodyReader, err := inspectResponse(ct, f.Body, clen)
|
||||||
|
|
||||||
defer bodyReader.Close()
|
defer bodyReader.Close()
|
||||||
|
@ -316,7 +316,7 @@ func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
|
||||||
// - an io.Reader for the response body
|
// - an io.Reader for the response body
|
||||||
// - an error value which will be non-nil either when something goes wrong while
|
// - an error value which will be non-nil either when something goes wrong while
|
||||||
// reading bytes from r or when the detected content-type is not acceptable.
|
// reading bytes from r or when the detected content-type is not acceptable.
|
||||||
func inspectResponse(ct string, r io.ReadCloser, clen int) (string, io.ReadCloser, error) {
|
func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) {
|
||||||
plen := clen
|
plen := clen
|
||||||
if plen <= 0 || plen > maxPreambleLength {
|
if plen <= 0 || plen > maxPreambleLength {
|
||||||
plen = maxPreambleLength
|
plen = maxPreambleLength
|
||||||
|
|
|
@ -31,7 +31,7 @@ func TestInspectEmptyResponse(t *testing.T) {
|
||||||
func TestInspectResponseBinary(t *testing.T) {
|
func TestInspectResponseBinary(t *testing.T) {
|
||||||
ct := "application/octet-stream"
|
ct := "application/octet-stream"
|
||||||
br := ioutil.NopCloser(bytes.NewReader(binaryContext))
|
br := ioutil.NopCloser(bytes.NewReader(binaryContext))
|
||||||
contentType, bReader, err := inspectResponse(ct, br, len(binaryContext))
|
contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ func TestResponseUnsupportedContentType(t *testing.T) {
|
||||||
content := []byte(textPlainDockerfile)
|
content := []byte(textPlainDockerfile)
|
||||||
ct := "application/json"
|
ct := "application/json"
|
||||||
br := ioutil.NopCloser(bytes.NewReader(content))
|
br := ioutil.NopCloser(bytes.NewReader(content))
|
||||||
contentType, bReader, err := inspectResponse(ct, br, len(textPlainDockerfile))
|
contentType, bReader, err := inspectResponse(ct, br, int64(len(textPlainDockerfile)))
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Should have returned an error on content-type 'application/json'")
|
t.Fatal("Should have returned an error on content-type 'application/json'")
|
||||||
|
@ -77,7 +77,7 @@ func TestInspectResponseTextSimple(t *testing.T) {
|
||||||
content := []byte(textPlainDockerfile)
|
content := []byte(textPlainDockerfile)
|
||||||
ct := "text/plain"
|
ct := "text/plain"
|
||||||
br := ioutil.NopCloser(bytes.NewReader(content))
|
br := ioutil.NopCloser(bytes.NewReader(content))
|
||||||
contentType, bReader, err := inspectResponse(ct, br, len(content))
|
contentType, bReader, err := inspectResponse(ct, br, int64(len(content)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -96,7 +96,7 @@ func TestInspectResponseTextSimple(t *testing.T) {
|
||||||
func TestInspectResponseEmptyContentType(t *testing.T) {
|
func TestInspectResponseEmptyContentType(t *testing.T) {
|
||||||
content := []byte(textPlainDockerfile)
|
content := []byte(textPlainDockerfile)
|
||||||
br := ioutil.NopCloser(bytes.NewReader(content))
|
br := ioutil.NopCloser(bytes.NewReader(content))
|
||||||
contentType, bodyReader, err := inspectResponse("", br, len(content))
|
contentType, bodyReader, err := inspectResponse("", br, int64(len(content)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -152,7 +152,7 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
|
||||||
} else {
|
} else {
|
||||||
newC.Command = fmt.Sprintf("%s", container.Path)
|
newC.Command = fmt.Sprintf("%s", container.Path)
|
||||||
}
|
}
|
||||||
newC.Created = int(container.Created.Unix())
|
newC.Created = container.Created.Unix()
|
||||||
newC.Status = container.State.String()
|
newC.Status = container.State.String()
|
||||||
newC.HostConfig.NetworkMode = string(container.hostConfig.NetworkMode)
|
newC.HostConfig.NetworkMode = string(container.hostConfig.NetworkMode)
|
||||||
|
|
||||||
|
@ -185,8 +185,8 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
|
||||||
|
|
||||||
if config.Size {
|
if config.Size {
|
||||||
sizeRw, sizeRootFs := container.GetSize()
|
sizeRw, sizeRootFs := container.GetSize()
|
||||||
newC.SizeRw = int(sizeRw)
|
newC.SizeRw = sizeRw
|
||||||
newC.SizeRootFs = int(sizeRootFs)
|
newC.SizeRootFs = sizeRootFs
|
||||||
}
|
}
|
||||||
newC.Labels = container.Config.Labels
|
newC.Labels = container.Config.Labels
|
||||||
containers = append(containers, newC)
|
containers = append(containers, newC)
|
||||||
|
|
|
@ -190,7 +190,7 @@ func (graph *Graph) Get(name string) (*image.Image, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
img.Size = size
|
img.Size = size
|
||||||
if err := graph.saveSize(graph.imageRoot(id), int(img.Size)); err != nil {
|
if err := graph.saveSize(graph.imageRoot(id), img.Size); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -490,8 +490,8 @@ func (graph *Graph) loadImage(id string) (*image.Image, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// saveSize stores the `size` in the provided graph `img` directory `root`.
|
// saveSize stores the `size` in the provided graph `img` directory `root`.
|
||||||
func (graph *Graph) saveSize(root string, size int) error {
|
func (graph *Graph) saveSize(root string, size int64) error {
|
||||||
if err := ioutil.WriteFile(filepath.Join(root, layersizeFileName), []byte(strconv.Itoa(size)), 0600); err != nil {
|
if err := ioutil.WriteFile(filepath.Join(root, layersizeFileName), []byte(strconv.FormatInt(size, 10)), 0600); err != nil {
|
||||||
return fmt.Errorf("Error storing image size in %s/%s: %s", root, layersizeFileName, err)
|
return fmt.Errorf("Error storing image size in %s/%s: %s", root, layersizeFileName, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -95,7 +95,7 @@ func (graph *Graph) storeImage(img *image.Image, layerData archive.ArchiveReader
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := graph.saveSize(root, int(img.Size)); err != nil {
|
if err := graph.saveSize(root, img.Size); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ func (graph *Graph) storeImage(img *image.Image, layerData archive.ArchiveReader
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := graph.saveSize(root, int(img.Size)); err != nil {
|
if err := graph.saveSize(root, img.Size); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ func (s *TagStore) Import(src string, repo string, tag string, inConfig io.ReadC
|
||||||
In: resp.Body,
|
In: resp.Body,
|
||||||
Out: outStream,
|
Out: outStream,
|
||||||
Formatter: sf,
|
Formatter: sf,
|
||||||
Size: int(resp.ContentLength),
|
Size: resp.ContentLength,
|
||||||
NewLines: true,
|
NewLines: true,
|
||||||
ID: "",
|
ID: "",
|
||||||
Action: "Importing",
|
Action: "Importing",
|
||||||
|
|
|
@ -99,9 +99,9 @@ func (s *TagStore) Images(filterArgs, filter string, all bool) ([]*types.Image,
|
||||||
newImage := new(types.Image)
|
newImage := new(types.Image)
|
||||||
newImage.ParentId = image.Parent
|
newImage.ParentId = image.Parent
|
||||||
newImage.ID = image.ID
|
newImage.ID = image.ID
|
||||||
newImage.Created = int(image.Created.Unix())
|
newImage.Created = image.Created.Unix()
|
||||||
newImage.Size = int(image.Size)
|
newImage.Size = image.Size
|
||||||
newImage.VirtualSize = int(s.graph.GetParentsSize(image) + image.Size)
|
newImage.VirtualSize = s.graph.GetParentsSize(image) + image.Size
|
||||||
newImage.Labels = image.ContainerConfig.Labels
|
newImage.Labels = image.ContainerConfig.Labels
|
||||||
|
|
||||||
if utils.DigestReference(ref) {
|
if utils.DigestReference(ref) {
|
||||||
|
@ -136,9 +136,9 @@ func (s *TagStore) Images(filterArgs, filter string, all bool) ([]*types.Image,
|
||||||
newImage.RepoTags = []string{"<none>:<none>"}
|
newImage.RepoTags = []string{"<none>:<none>"}
|
||||||
newImage.RepoDigests = []string{"<none>@<none>"}
|
newImage.RepoDigests = []string{"<none>@<none>"}
|
||||||
newImage.ID = image.ID
|
newImage.ID = image.ID
|
||||||
newImage.Created = int(image.Created.Unix())
|
newImage.Created = image.Created.Unix()
|
||||||
newImage.Size = int(image.Size)
|
newImage.Size = image.Size
|
||||||
newImage.VirtualSize = int(s.graph.GetParentsSize(image) + image.Size)
|
newImage.VirtualSize = s.graph.GetParentsSize(image) + image.Size
|
||||||
newImage.Labels = image.ContainerConfig.Labels
|
newImage.Labels = image.ContainerConfig.Labels
|
||||||
|
|
||||||
images = append(images, newImage)
|
images = append(images, newImage)
|
||||||
|
|
|
@ -256,7 +256,7 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro
|
||||||
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
|
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
|
||||||
var (
|
var (
|
||||||
imgJSON []byte
|
imgJSON []byte
|
||||||
imgSize int
|
imgSize int64
|
||||||
err error
|
err error
|
||||||
img *image.Image
|
img *image.Image
|
||||||
)
|
)
|
||||||
|
@ -290,7 +290,7 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro
|
||||||
status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
|
status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
|
||||||
}
|
}
|
||||||
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil))
|
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil))
|
||||||
layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, int64(imgSize))
|
layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, imgSize)
|
||||||
if uerr, ok := err.(*url.Error); ok {
|
if uerr, ok := err.(*url.Error); ok {
|
||||||
err = uerr.Err
|
err = uerr.Err
|
||||||
}
|
}
|
||||||
|
|
|
@ -167,7 +167,7 @@ func (p *v2Puller) download(di *downloadInfo) {
|
||||||
In: ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
|
In: ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
|
||||||
Out: out,
|
Out: out,
|
||||||
Formatter: p.sf,
|
Formatter: p.sf,
|
||||||
Size: int(di.size),
|
Size: di.size,
|
||||||
NewLines: false,
|
NewLines: false,
|
||||||
ID: stringid.TruncateID(di.img.ID),
|
ID: stringid.TruncateID(di.img.ID),
|
||||||
Action: "Downloading",
|
Action: "Downloading",
|
||||||
|
@ -264,7 +264,7 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
|
||||||
In: d.tmpFile,
|
In: d.tmpFile,
|
||||||
Out: out,
|
Out: out,
|
||||||
Formatter: p.sf,
|
Formatter: p.sf,
|
||||||
Size: int(d.size),
|
Size: d.size,
|
||||||
NewLines: false,
|
NewLines: false,
|
||||||
ID: stringid.TruncateID(d.img.ID),
|
ID: stringid.TruncateID(d.img.ID),
|
||||||
Action: "Extracting",
|
Action: "Extracting",
|
||||||
|
|
|
@ -289,7 +289,7 @@ func (p *v1Pusher) pushImage(imgID, ep string, token []string) (checksum string,
|
||||||
In: layerData,
|
In: layerData,
|
||||||
Out: p.out,
|
Out: p.out,
|
||||||
Formatter: p.sf,
|
Formatter: p.sf,
|
||||||
Size: int(layerData.Size),
|
Size: layerData.Size,
|
||||||
NewLines: false,
|
NewLines: false,
|
||||||
ID: stringid.TruncateID(imgData.ID),
|
ID: stringid.TruncateID(imgData.ID),
|
||||||
Action: "Pushing",
|
Action: "Pushing",
|
||||||
|
|
|
@ -239,7 +239,7 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
|
||||||
In: ioutil.NopCloser(tf),
|
In: ioutil.NopCloser(tf),
|
||||||
Out: out,
|
Out: out,
|
||||||
Formatter: p.sf,
|
Formatter: p.sf,
|
||||||
Size: int(size),
|
Size: size,
|
||||||
NewLines: false,
|
NewLines: false,
|
||||||
ID: stringid.TruncateID(img.ID),
|
ID: stringid.TruncateID(img.ID),
|
||||||
Action: "Pushing",
|
Action: "Pushing",
|
||||||
|
|
|
@ -23,8 +23,8 @@ func (e *JSONError) Error() string {
|
||||||
|
|
||||||
type JSONProgress struct {
|
type JSONProgress struct {
|
||||||
terminalFd uintptr
|
terminalFd uintptr
|
||||||
Current int `json:"current,omitempty"`
|
Current int64 `json:"current,omitempty"`
|
||||||
Total int `json:"total,omitempty"`
|
Total int64 `json:"total,omitempty"`
|
||||||
Start int64 `json:"start,omitempty"`
|
Start int64 `json:"start,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ func (p *JSONProgress) String() string {
|
||||||
numbersBox = fmt.Sprintf("%8v/%v", current, total)
|
numbersBox = fmt.Sprintf("%8v/%v", current, total)
|
||||||
|
|
||||||
if p.Current > 0 && p.Start > 0 && percentage < 50 {
|
if p.Current > 0 && p.Start > 0 && percentage < 50 {
|
||||||
fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0))
|
fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0))
|
||||||
perEntry := fromStart / time.Duration(p.Current)
|
perEntry := fromStart / time.Duration(p.Current)
|
||||||
left := time.Duration(p.Total-p.Current) * perEntry
|
left := time.Duration(p.Total-p.Current) * perEntry
|
||||||
left = (left / time.Second) * time.Second
|
left = (left / time.Second) * time.Second
|
||||||
|
|
|
@ -14,9 +14,9 @@ type Config struct {
|
||||||
In io.ReadCloser // Stream to read from
|
In io.ReadCloser // Stream to read from
|
||||||
Out io.Writer // Where to send progress bar to
|
Out io.Writer // Where to send progress bar to
|
||||||
Formatter *streamformatter.StreamFormatter
|
Formatter *streamformatter.StreamFormatter
|
||||||
Size int
|
Size int64
|
||||||
Current int
|
Current int64
|
||||||
LastUpdate int
|
LastUpdate int64
|
||||||
NewLines bool
|
NewLines bool
|
||||||
ID string
|
ID string
|
||||||
Action string
|
Action string
|
||||||
|
@ -29,11 +29,11 @@ func New(newReader Config) *Config {
|
||||||
|
|
||||||
func (config *Config) Read(p []byte) (n int, err error) {
|
func (config *Config) Read(p []byte) (n int, err error) {
|
||||||
read, err := config.In.Read(p)
|
read, err := config.In.Read(p)
|
||||||
config.Current += read
|
config.Current += int64(read)
|
||||||
updateEvery := 1024 * 512 //512kB
|
updateEvery := int64(1024 * 512) //512kB
|
||||||
if config.Size > 0 {
|
if config.Size > 0 {
|
||||||
// Update progress for every 1% read if 1% < 512kB
|
// Update progress for every 1% read if 1% < 512kB
|
||||||
if increment := int(0.01 * float64(config.Size)); increment < updateEvery {
|
if increment := int64(0.01 * float64(config.Size)); increment < updateEvery {
|
||||||
updateEvery = increment
|
updateEvery = increment
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ func TestOutputOnPrematureClose(t *testing.T) {
|
||||||
In: reader,
|
In: reader,
|
||||||
Out: writer,
|
Out: writer,
|
||||||
Formatter: streamformatter.NewStreamFormatter(),
|
Formatter: streamformatter.NewStreamFormatter(),
|
||||||
Size: len(content),
|
Size: int64(len(content)),
|
||||||
NewLines: true,
|
NewLines: true,
|
||||||
ID: "Test",
|
ID: "Test",
|
||||||
Action: "Read",
|
Action: "Read",
|
||||||
|
@ -60,7 +60,7 @@ func TestCompleteSilently(t *testing.T) {
|
||||||
In: reader,
|
In: reader,
|
||||||
Out: writer,
|
Out: writer,
|
||||||
Formatter: streamformatter.NewStreamFormatter(),
|
Formatter: streamformatter.NewStreamFormatter(),
|
||||||
Size: len(content),
|
Size: int64(len(content)),
|
||||||
NewLines: true,
|
NewLines: true,
|
||||||
ID: "Test",
|
ID: "Test",
|
||||||
Action: "Read",
|
Action: "Read",
|
||||||
|
|
|
@ -95,17 +95,17 @@ func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]s
|
||||||
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
||||||
return [][2]string{
|
return [][2]string{
|
||||||
{"name", h.Name},
|
{"name", h.Name},
|
||||||
{"mode", strconv.Itoa(int(h.Mode))},
|
{"mode", strconv.FormatInt(h.Mode, 10)},
|
||||||
{"uid", strconv.Itoa(h.Uid)},
|
{"uid", strconv.Itoa(h.Uid)},
|
||||||
{"gid", strconv.Itoa(h.Gid)},
|
{"gid", strconv.Itoa(h.Gid)},
|
||||||
{"size", strconv.Itoa(int(h.Size))},
|
{"size", strconv.FormatInt(h.Size, 10)},
|
||||||
{"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))},
|
{"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
|
||||||
{"typeflag", string([]byte{h.Typeflag})},
|
{"typeflag", string([]byte{h.Typeflag})},
|
||||||
{"linkname", h.Linkname},
|
{"linkname", h.Linkname},
|
||||||
{"uname", h.Uname},
|
{"uname", h.Uname},
|
||||||
{"gname", h.Gname},
|
{"gname", h.Gname},
|
||||||
{"devmajor", strconv.Itoa(int(h.Devmajor))},
|
{"devmajor", strconv.FormatInt(h.Devmajor, 10)},
|
||||||
{"devminor", strconv.Itoa(int(h.Devminor))},
|
{"devminor", strconv.FormatInt(h.Devminor, 10)},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -185,7 +185,7 @@ func TestGetRemoteImageJSON(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
assertEqual(t, size, 154, "Expected size 154")
|
assertEqual(t, size, int64(154), "Expected size 154")
|
||||||
if len(json) <= 0 {
|
if len(json) <= 0 {
|
||||||
t.Fatal("Expected non-empty json")
|
t.Fatal("Expected non-empty json")
|
||||||
}
|
}
|
||||||
|
|
|
@ -240,7 +240,7 @@ func (r *Session) LookupRemoteImage(imgID, registry string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRemoteImageJSON retrieves an image's JSON metadata from the registry.
|
// GetRemoteImageJSON retrieves an image's JSON metadata from the registry.
|
||||||
func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int, error) {
|
func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) {
|
||||||
res, err := r.client.Get(registry + "images/" + imgID + "/json")
|
res, err := r.client.Get(registry + "images/" + imgID + "/json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
|
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
|
||||||
|
@ -250,9 +250,9 @@ func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int, error
|
||||||
return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
|
return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
|
||||||
}
|
}
|
||||||
// if the size header is not present, then set it to '-1'
|
// if the size header is not present, then set it to '-1'
|
||||||
imageSize := -1
|
imageSize := int64(-1)
|
||||||
if hdr := res.Header.Get("X-Docker-Size"); hdr != "" {
|
if hdr := res.Header.Get("X-Docker-Size"); hdr != "" {
|
||||||
imageSize, err = strconv.Atoi(hdr)
|
imageSize, err = strconv.ParseInt(hdr, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue