From 74f8e47352e71aad4015d8d9dea8f16e7a055863 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Wed, 11 Apr 2018 15:19:15 -0400 Subject: [PATCH] Limit authz response buffer When the authz response buffer limit is hit, perform a flush. This prevents excessive buffer sizes, especially on large responses (e.g. `/containers//archive` or `/containers//export`). Signed-off-by: Brian Goff --- integration/plugin/authz/authz_plugin_test.go | 51 +++++++++++++++++++ pkg/authorization/response.go | 13 +++-- 2 files changed, 61 insertions(+), 3 deletions(-) diff --git a/integration/plugin/authz/authz_plugin_test.go b/integration/plugin/authz/authz_plugin_test.go index 5bca6c138d..5585d93024 100644 --- a/integration/plugin/authz/authz_plugin_test.go +++ b/integration/plugin/authz/authz_plugin_test.go @@ -23,6 +23,7 @@ import ( "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/internal/test/environment" + "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/authorization" "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/skip" @@ -382,6 +383,56 @@ func TestAuthZPluginEnsureLoadImportWorking(t *testing.T) { assert.NilError(t, err) } +func TestAuthzPluginEnsureContainerCopyToFrom(t *testing.T) { + defer setupTestV1(t)() + ctrl.reqRes.Allow = true + ctrl.resRes.Allow = true + d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin) + + dir, err := ioutil.TempDir("", t.Name()) + assert.Assert(t, err) + defer os.RemoveAll(dir) + + f, err := ioutil.TempFile(dir, "send") + assert.Assert(t, err) + defer f.Close() + + buf := make([]byte, 1024) + fileSize := len(buf) * 1024 * 10 + for written := 0; written < fileSize; { + n, err := f.Write(buf) + assert.Assert(t, err) + written += n + } + + ctx := context.Background() + client, err := d.NewClient() + assert.Assert(t, err) + + cID := container.Run(t, ctx, client) + defer client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true}) + + _, err = f.Seek(0, io.SeekStart) + assert.Assert(t, err) + + srcInfo, err := archive.CopyInfoSourcePath(f.Name(), false) + assert.Assert(t, err) + srcArchive, err := archive.TarResource(srcInfo) + assert.Assert(t, err) + defer srcArchive.Close() + + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, archive.CopyInfo{Path: "/test"}) + assert.Assert(t, err) + + err = client.CopyToContainer(ctx, cID, dstDir, preparedArchive, types.CopyToContainerOptions{}) + assert.Assert(t, err) + + rdr, _, err := client.CopyFromContainer(ctx, cID, "/test") + assert.Assert(t, err) + _, err = io.Copy(ioutil.Discard, rdr) + assert.Assert(t, err) +} + func imageSave(client client.APIClient, path, image string) error { ctx := context.Background() responseReader, err := client.ImageSave(ctx, []string{image}) diff --git a/pkg/authorization/response.go b/pkg/authorization/response.go index 647ff9ff6e..6b674bc295 100644 --- a/pkg/authorization/response.go +++ b/pkg/authorization/response.go @@ -47,6 +47,8 @@ func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { return &responseModifier{rw: rw, header: make(http.Header)} } +const maxBufferSize = 64 * 1024 + // responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore // the http request/response from docker daemon type responseModifier struct { @@ -116,11 +118,13 @@ func (rm *responseModifier) OverrideHeader(b []byte) error { // Write stores the byte array inside content func (rm *responseModifier) Write(b []byte) (int, error) { - if rm.hijacked { return rm.rw.Write(b) } + if len(rm.body)+len(b) > maxBufferSize { + rm.Flush() + } rm.body = append(rm.body, b...) return len(b), nil } @@ -192,11 +196,14 @@ func (rm *responseModifier) FlushAll() error { var err error if len(rm.body) > 0 { // Write body - _, err = rm.rw.Write(rm.body) + var n int + n, err = rm.rw.Write(rm.body) + // TODO(@cpuguy83): there is now a relatively small buffer limit, instead of discarding our buffer here and + // allocating again later this should just keep using the same buffer and track the buffer position (like a bytes.Buffer with a fixed size) + rm.body = rm.body[n:] } // Clean previous data - rm.body = nil rm.statusCode = 0 rm.header = http.Header{} return err