1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #14921 from aaronlehmann/int64

Fix uses of "int" where "int64" should be used instead
This commit is contained in:
Arnaud Porterie 2015-08-04 19:16:13 -07:00
commit 7374852be9
22 changed files with 56 additions and 56 deletions

View file

@ -462,7 +462,7 @@ func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (absCont
In: response.Body,
Out: out,
Formatter: streamformatter.NewStreamFormatter(),
Size: int(response.ContentLength),
Size: response.ContentLength,
NewLines: true,
ID: "",
Action: fmt.Sprintf("Downloading build context from remote url: %s", remoteURL),

View file

@ -27,7 +27,7 @@ func TestContainerContextID(t *testing.T) {
{types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image},
{types.Container{Image: ""}, true, "<no image>", imageHeader, ctx.Image},
{types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command},
{types.Container{Created: int(unix)}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt},
{types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt},
{types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports},
{types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status},
{types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size},

View file

@ -70,9 +70,9 @@ type Image struct {
ParentId string
RepoTags []string
RepoDigests []string
Created int
Size int
VirtualSize int
Created int64
Size int64
VirtualSize int64
Labels map[string]string
}
@ -112,10 +112,10 @@ type Container struct {
Names []string
Image string
Command string
Created int
Created int64
Ports []Port
SizeRw int `json:",omitempty"`
SizeRootFs int `json:",omitempty"`
SizeRw int64 `json:",omitempty"`
SizeRootFs int64 `json:",omitempty"`
Labels map[string]string
Status string
HostConfig struct {

View file

@ -330,7 +330,7 @@ func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath stri
In: resp.Body,
Out: b.OutOld,
Formatter: b.StreamFormatter,
Size: int(resp.ContentLength),
Size: resp.ContentLength,
NewLines: true,
ID: "",
Action: "Downloading",

View file

@ -139,7 +139,7 @@ func Build(d *daemon.Daemon, buildConfig *Config) error {
}
defer f.Body.Close()
ct := f.Header.Get("Content-Type")
clen := int(f.ContentLength)
clen := f.ContentLength
contentType, bodyReader, err := inspectResponse(ct, f.Body, clen)
defer bodyReader.Close()
@ -316,7 +316,7 @@ func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
// - an io.Reader for the response body
// - an error value which will be non-nil either when something goes wrong while
// reading bytes from r or when the detected content-type is not acceptable.
func inspectResponse(ct string, r io.ReadCloser, clen int) (string, io.ReadCloser, error) {
func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) {
plen := clen
if plen <= 0 || plen > maxPreambleLength {
plen = maxPreambleLength

View file

@ -31,7 +31,7 @@ func TestInspectEmptyResponse(t *testing.T) {
func TestInspectResponseBinary(t *testing.T) {
ct := "application/octet-stream"
br := ioutil.NopCloser(bytes.NewReader(binaryContext))
contentType, bReader, err := inspectResponse(ct, br, len(binaryContext))
contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext)))
if err != nil {
t.Fatal(err)
}
@ -56,7 +56,7 @@ func TestResponseUnsupportedContentType(t *testing.T) {
content := []byte(textPlainDockerfile)
ct := "application/json"
br := ioutil.NopCloser(bytes.NewReader(content))
contentType, bReader, err := inspectResponse(ct, br, len(textPlainDockerfile))
contentType, bReader, err := inspectResponse(ct, br, int64(len(textPlainDockerfile)))
if err == nil {
t.Fatal("Should have returned an error on content-type 'application/json'")
@ -77,7 +77,7 @@ func TestInspectResponseTextSimple(t *testing.T) {
content := []byte(textPlainDockerfile)
ct := "text/plain"
br := ioutil.NopCloser(bytes.NewReader(content))
contentType, bReader, err := inspectResponse(ct, br, len(content))
contentType, bReader, err := inspectResponse(ct, br, int64(len(content)))
if err != nil {
t.Fatal(err)
}
@ -96,7 +96,7 @@ func TestInspectResponseTextSimple(t *testing.T) {
func TestInspectResponseEmptyContentType(t *testing.T) {
content := []byte(textPlainDockerfile)
br := ioutil.NopCloser(bytes.NewReader(content))
contentType, bodyReader, err := inspectResponse("", br, len(content))
contentType, bodyReader, err := inspectResponse("", br, int64(len(content)))
if err != nil {
t.Fatal(err)
}

View file

@ -152,7 +152,7 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
} else {
newC.Command = fmt.Sprintf("%s", container.Path)
}
newC.Created = int(container.Created.Unix())
newC.Created = container.Created.Unix()
newC.Status = container.State.String()
newC.HostConfig.NetworkMode = string(container.hostConfig.NetworkMode)
@ -185,8 +185,8 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
if config.Size {
sizeRw, sizeRootFs := container.GetSize()
newC.SizeRw = int(sizeRw)
newC.SizeRootFs = int(sizeRootFs)
newC.SizeRw = sizeRw
newC.SizeRootFs = sizeRootFs
}
newC.Labels = container.Config.Labels
containers = append(containers, newC)

View file

@ -190,7 +190,7 @@ func (graph *Graph) Get(name string) (*image.Image, error) {
}
img.Size = size
if err := graph.saveSize(graph.imageRoot(id), int(img.Size)); err != nil {
if err := graph.saveSize(graph.imageRoot(id), img.Size); err != nil {
return nil, err
}
}
@ -490,8 +490,8 @@ func (graph *Graph) loadImage(id string) (*image.Image, error) {
}
// saveSize stores the `size` in the provided graph `img` directory `root`.
func (graph *Graph) saveSize(root string, size int) error {
if err := ioutil.WriteFile(filepath.Join(root, layersizeFileName), []byte(strconv.Itoa(size)), 0600); err != nil {
func (graph *Graph) saveSize(root string, size int64) error {
if err := ioutil.WriteFile(filepath.Join(root, layersizeFileName), []byte(strconv.FormatInt(size, 10)), 0600); err != nil {
return fmt.Errorf("Error storing image size in %s/%s: %s", root, layersizeFileName, err)
}
return nil

View file

@ -95,7 +95,7 @@ func (graph *Graph) storeImage(img *image.Image, layerData archive.ArchiveReader
}
}
if err := graph.saveSize(root, int(img.Size)); err != nil {
if err := graph.saveSize(root, img.Size); err != nil {
return err
}

View file

@ -95,7 +95,7 @@ func (graph *Graph) storeImage(img *image.Image, layerData archive.ArchiveReader
}
}
if err := graph.saveSize(root, int(img.Size)); err != nil {
if err := graph.saveSize(root, img.Size); err != nil {
return err
}

View file

@ -45,7 +45,7 @@ func (s *TagStore) Import(src string, repo string, tag string, inConfig io.ReadC
In: resp.Body,
Out: outStream,
Formatter: sf,
Size: int(resp.ContentLength),
Size: resp.ContentLength,
NewLines: true,
ID: "",
Action: "Importing",

View file

@ -99,9 +99,9 @@ func (s *TagStore) Images(filterArgs, filter string, all bool) ([]*types.Image,
newImage := new(types.Image)
newImage.ParentId = image.Parent
newImage.ID = image.ID
newImage.Created = int(image.Created.Unix())
newImage.Size = int(image.Size)
newImage.VirtualSize = int(s.graph.GetParentsSize(image) + image.Size)
newImage.Created = image.Created.Unix()
newImage.Size = image.Size
newImage.VirtualSize = s.graph.GetParentsSize(image) + image.Size
newImage.Labels = image.ContainerConfig.Labels
if utils.DigestReference(ref) {
@ -136,9 +136,9 @@ func (s *TagStore) Images(filterArgs, filter string, all bool) ([]*types.Image,
newImage.RepoTags = []string{"<none>:<none>"}
newImage.RepoDigests = []string{"<none>@<none>"}
newImage.ID = image.ID
newImage.Created = int(image.Created.Unix())
newImage.Size = int(image.Size)
newImage.VirtualSize = int(s.graph.GetParentsSize(image) + image.Size)
newImage.Created = image.Created.Unix()
newImage.Size = image.Size
newImage.VirtualSize = s.graph.GetParentsSize(image) + image.Size
newImage.Labels = image.ContainerConfig.Labels
images = append(images, newImage)

View file

@ -256,7 +256,7 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
var (
imgJSON []byte
imgSize int
imgSize int64
err error
img *image.Image
)
@ -290,7 +290,7 @@ func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, erro
status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
}
out.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil))
layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, int64(imgSize))
layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, imgSize)
if uerr, ok := err.(*url.Error); ok {
err = uerr.Err
}

View file

@ -166,7 +166,7 @@ func (p *v2Puller) download(di *downloadInfo) {
In: ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
Out: out,
Formatter: p.sf,
Size: int(di.size),
Size: di.size,
NewLines: false,
ID: stringid.TruncateID(di.img.ID),
Action: "Downloading",
@ -264,7 +264,7 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
In: d.tmpFile,
Out: out,
Formatter: p.sf,
Size: int(d.size),
Size: d.size,
NewLines: false,
ID: stringid.TruncateID(d.img.ID),
Action: "Extracting",

View file

@ -289,7 +289,7 @@ func (p *v1Pusher) pushImage(imgID, ep string, token []string) (checksum string,
In: layerData,
Out: p.out,
Formatter: p.sf,
Size: int(layerData.Size),
Size: layerData.Size,
NewLines: false,
ID: stringid.TruncateID(imgData.ID),
Action: "Pushing",

View file

@ -239,7 +239,7 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
In: ioutil.NopCloser(tf),
Out: out,
Formatter: p.sf,
Size: int(size),
Size: size,
NewLines: false,
ID: stringid.TruncateID(img.ID),
Action: "Pushing",

View file

@ -23,8 +23,8 @@ func (e *JSONError) Error() string {
type JSONProgress struct {
terminalFd uintptr
Current int `json:"current,omitempty"`
Total int `json:"total,omitempty"`
Current int64 `json:"current,omitempty"`
Total int64 `json:"total,omitempty"`
Start int64 `json:"start,omitempty"`
}
@ -64,7 +64,7 @@ func (p *JSONProgress) String() string {
numbersBox = fmt.Sprintf("%8v/%v", current, total)
if p.Current > 0 && p.Start > 0 && percentage < 50 {
fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0))
fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0))
perEntry := fromStart / time.Duration(p.Current)
left := time.Duration(p.Total-p.Current) * perEntry
left = (left / time.Second) * time.Second

View file

@ -14,9 +14,9 @@ type Config struct {
In io.ReadCloser // Stream to read from
Out io.Writer // Where to send progress bar to
Formatter *streamformatter.StreamFormatter
Size int
Current int
LastUpdate int
Size int64
Current int64
LastUpdate int64
NewLines bool
ID string
Action string
@ -29,11 +29,11 @@ func New(newReader Config) *Config {
func (config *Config) Read(p []byte) (n int, err error) {
read, err := config.In.Read(p)
config.Current += read
updateEvery := 1024 * 512 //512kB
config.Current += int64(read)
updateEvery := int64(1024 * 512) //512kB
if config.Size > 0 {
// Update progress for every 1% read if 1% < 512kB
if increment := int(0.01 * float64(config.Size)); increment < updateEvery {
if increment := int64(0.01 * float64(config.Size)); increment < updateEvery {
updateEvery = increment
}
}

View file

@ -20,7 +20,7 @@ func TestOutputOnPrematureClose(t *testing.T) {
In: reader,
Out: writer,
Formatter: streamformatter.NewStreamFormatter(),
Size: len(content),
Size: int64(len(content)),
NewLines: true,
ID: "Test",
Action: "Read",
@ -60,7 +60,7 @@ func TestCompleteSilently(t *testing.T) {
In: reader,
Out: writer,
Formatter: streamformatter.NewStreamFormatter(),
Size: len(content),
Size: int64(len(content)),
NewLines: true,
ID: "Test",
Action: "Read",

View file

@ -95,17 +95,17 @@ func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]s
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
return [][2]string{
{"name", h.Name},
{"mode", strconv.Itoa(int(h.Mode))},
{"mode", strconv.FormatInt(h.Mode, 10)},
{"uid", strconv.Itoa(h.Uid)},
{"gid", strconv.Itoa(h.Gid)},
{"size", strconv.Itoa(int(h.Size))},
{"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))},
{"size", strconv.FormatInt(h.Size, 10)},
{"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
{"typeflag", string([]byte{h.Typeflag})},
{"linkname", h.Linkname},
{"uname", h.Uname},
{"gname", h.Gname},
{"devmajor", strconv.Itoa(int(h.Devmajor))},
{"devminor", strconv.Itoa(int(h.Devminor))},
{"devmajor", strconv.FormatInt(h.Devmajor, 10)},
{"devminor", strconv.FormatInt(h.Devminor, 10)},
}
}

View file

@ -185,7 +185,7 @@ func TestGetRemoteImageJSON(t *testing.T) {
if err != nil {
t.Fatal(err)
}
assertEqual(t, size, 154, "Expected size 154")
assertEqual(t, size, int64(154), "Expected size 154")
if len(json) <= 0 {
t.Fatal("Expected non-empty json")
}

View file

@ -240,7 +240,7 @@ func (r *Session) LookupRemoteImage(imgID, registry string) error {
}
// GetRemoteImageJSON retrieves an image's JSON metadata from the registry.
func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int, error) {
func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) {
res, err := r.client.Get(registry + "images/" + imgID + "/json")
if err != nil {
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
@ -250,9 +250,9 @@ func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int, error
return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
}
// if the size header is not present, then set it to '-1'
imageSize := -1
imageSize := int64(-1)
if hdr := res.Header.Get("X-Docker-Size"); hdr != "" {
imageSize, err = strconv.Atoi(hdr)
imageSize, err = strconv.ParseInt(hdr, 10, 64)
if err != nil {
return nil, -1, err
}