mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Fix layer compression regression
PR #15493 removed compression of layers when pushing them to a V2 registry. This this makes layer uploads larger than they should be. This commit restores the compression. It uses an io.Pipe to turn the gzip compressor output Writer into a Reader, so the ReadFrom method can be used on the BlobWriter (which is very important for avoiding many PATCH requests per layer). Fixes #17209 Fixes #17038 Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
This commit is contained in:
parent
c516aa645e
commit
4dce280d96
1 changed files with 23 additions and 5 deletions
|
@ -1,6 +1,7 @@
|
||||||
package graph
|
package graph
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"compress/gzip"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -236,11 +237,8 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
|
||||||
}
|
}
|
||||||
defer layerUpload.Close()
|
defer layerUpload.Close()
|
||||||
|
|
||||||
digester := digest.Canonical.New()
|
|
||||||
tee := io.TeeReader(arch, digester.Hash())
|
|
||||||
|
|
||||||
reader := progressreader.New(progressreader.Config{
|
reader := progressreader.New(progressreader.Config{
|
||||||
In: ioutil.NopCloser(tee), // we'll take care of close here.
|
In: ioutil.NopCloser(arch), // we'll take care of close here.
|
||||||
Out: out,
|
Out: out,
|
||||||
Formatter: p.sf,
|
Formatter: p.sf,
|
||||||
|
|
||||||
|
@ -254,8 +252,28 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
|
||||||
Action: "Pushing",
|
Action: "Pushing",
|
||||||
})
|
})
|
||||||
|
|
||||||
|
digester := digest.Canonical.New()
|
||||||
|
// HACK: The MultiWriter doesn't write directly to layerUpload because
|
||||||
|
// we must make sure the ReadFrom is used, not Write. Using Write would
|
||||||
|
// send a PATCH request for every Write call.
|
||||||
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
|
compressor := gzip.NewWriter(io.MultiWriter(pipeWriter, digester.Hash()))
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_, err := io.Copy(compressor, reader)
|
||||||
|
if err == nil {
|
||||||
|
err = compressor.Close()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
pipeWriter.CloseWithError(err)
|
||||||
|
} else {
|
||||||
|
pipeWriter.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
|
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
|
||||||
nn, err := io.Copy(layerUpload, reader)
|
nn, err := layerUpload.ReadFrom(pipeReader)
|
||||||
|
pipeReader.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue