mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Add a buffered Writer between layer compression and layer upload
Without this buffering, the compressor was outputting 64 bytes at a time to the HTTP stream, which was resulting in absurdly small chunk sizes and a lot of extra overhead. The buffering restores the chunk size to 32768 bytes, which matches the behavior with 1.8.2. Times pushing to a local registry: 1.8.2: 0m18.934s master: 0m20.564s master+this commit: 0m17.593s Fixes: #17038 Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
This commit is contained in:
parent
21aecf62a0
commit
bb69f10df9
1 changed files with 9 additions and 1 deletions
|
@ -1,6 +1,7 @@
|
|||
package graph
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -20,6 +21,8 @@ import (
|
|||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const compressionBufSize = 32768
|
||||
|
||||
type v2Pusher struct {
|
||||
*TagStore
|
||||
endpoint registry.APIEndpoint
|
||||
|
@ -259,13 +262,18 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
|
|||
// we must make sure the ReadFrom is used, not Write. Using Write would
|
||||
// send a PATCH request for every Write call.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
compressor := gzip.NewWriter(io.MultiWriter(pipeWriter, digester.Hash()))
|
||||
// Use a bufio.Writer to avoid excessive chunking in HTTP request.
|
||||
bufWriter := bufio.NewWriterSize(io.MultiWriter(pipeWriter, digester.Hash()), compressionBufSize)
|
||||
compressor := gzip.NewWriter(bufWriter)
|
||||
|
||||
go func() {
|
||||
_, err := io.Copy(compressor, reader)
|
||||
if err == nil {
|
||||
err = compressor.Close()
|
||||
}
|
||||
if err == nil {
|
||||
err = bufWriter.Flush()
|
||||
}
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
} else {
|
||||
|
|
Loading…
Reference in a new issue