Limit authz response buffer
When the authz response buffer limit is hit, perform a flush. This prevents excessive buffer sizes, especially on large responses (e.g. `/containers/<id>/archive` or `/containers/<id>/export`). Signed-off-by: Brian Goff <cpuguy83@gmail.com>
This commit is contained in:
parent
ce3c2e2331
commit
74f8e47352
2 changed files with 61 additions and 3 deletions
|
@ -23,6 +23,7 @@ import (
|
|||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/internal/test/environment"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/authorization"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
"github.com/gotestyourself/gotestyourself/skip"
|
||||
|
@ -382,6 +383,56 @@ func TestAuthZPluginEnsureLoadImportWorking(t *testing.T) {
|
|||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func TestAuthzPluginEnsureContainerCopyToFrom(t *testing.T) {
|
||||
defer setupTestV1(t)()
|
||||
ctrl.reqRes.Allow = true
|
||||
ctrl.resRes.Allow = true
|
||||
d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin)
|
||||
|
||||
dir, err := ioutil.TempDir("", t.Name())
|
||||
assert.Assert(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
f, err := ioutil.TempFile(dir, "send")
|
||||
assert.Assert(t, err)
|
||||
defer f.Close()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
fileSize := len(buf) * 1024 * 10
|
||||
for written := 0; written < fileSize; {
|
||||
n, err := f.Write(buf)
|
||||
assert.Assert(t, err)
|
||||
written += n
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := d.NewClient()
|
||||
assert.Assert(t, err)
|
||||
|
||||
cID := container.Run(t, ctx, client)
|
||||
defer client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true})
|
||||
|
||||
_, err = f.Seek(0, io.SeekStart)
|
||||
assert.Assert(t, err)
|
||||
|
||||
srcInfo, err := archive.CopyInfoSourcePath(f.Name(), false)
|
||||
assert.Assert(t, err)
|
||||
srcArchive, err := archive.TarResource(srcInfo)
|
||||
assert.Assert(t, err)
|
||||
defer srcArchive.Close()
|
||||
|
||||
dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, archive.CopyInfo{Path: "/test"})
|
||||
assert.Assert(t, err)
|
||||
|
||||
err = client.CopyToContainer(ctx, cID, dstDir, preparedArchive, types.CopyToContainerOptions{})
|
||||
assert.Assert(t, err)
|
||||
|
||||
rdr, _, err := client.CopyFromContainer(ctx, cID, "/test")
|
||||
assert.Assert(t, err)
|
||||
_, err = io.Copy(ioutil.Discard, rdr)
|
||||
assert.Assert(t, err)
|
||||
}
|
||||
|
||||
func imageSave(client client.APIClient, path, image string) error {
|
||||
ctx := context.Background()
|
||||
responseReader, err := client.ImageSave(ctx, []string{image})
|
||||
|
|
|
@ -47,6 +47,8 @@ func NewResponseModifier(rw http.ResponseWriter) ResponseModifier {
|
|||
return &responseModifier{rw: rw, header: make(http.Header)}
|
||||
}
|
||||
|
||||
const maxBufferSize = 64 * 1024
|
||||
|
||||
// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore
|
||||
// the http request/response from docker daemon
|
||||
type responseModifier struct {
|
||||
|
@ -116,11 +118,13 @@ func (rm *responseModifier) OverrideHeader(b []byte) error {
|
|||
|
||||
// Write stores the byte array inside content
|
||||
func (rm *responseModifier) Write(b []byte) (int, error) {
|
||||
|
||||
if rm.hijacked {
|
||||
return rm.rw.Write(b)
|
||||
}
|
||||
|
||||
if len(rm.body)+len(b) > maxBufferSize {
|
||||
rm.Flush()
|
||||
}
|
||||
rm.body = append(rm.body, b...)
|
||||
return len(b), nil
|
||||
}
|
||||
|
@ -192,11 +196,14 @@ func (rm *responseModifier) FlushAll() error {
|
|||
var err error
|
||||
if len(rm.body) > 0 {
|
||||
// Write body
|
||||
_, err = rm.rw.Write(rm.body)
|
||||
var n int
|
||||
n, err = rm.rw.Write(rm.body)
|
||||
// TODO(@cpuguy83): there is now a relatively small buffer limit, instead of discarding our buffer here and
|
||||
// allocating again later this should just keep using the same buffer and track the buffer position (like a bytes.Buffer with a fixed size)
|
||||
rm.body = rm.body[n:]
|
||||
}
|
||||
|
||||
// Clean previous data
|
||||
rm.body = nil
|
||||
rm.statusCode = 0
|
||||
rm.header = http.Header{}
|
||||
return err
|
||||
|
|
Loading…
Reference in a new issue