mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #41636 from TBBle/37352-test-and-fix
Set 127GB default sandbox size for WCOW, and ensure storage-opts is honoured on all paths under WCOW and LCOW
This commit is contained in:
commit
f266f13965
7 changed files with 108 additions and 49 deletions
|
@ -11,7 +11,6 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
@ -448,8 +447,7 @@ func (b *Builder) probeAndCreate(dispatchState *dispatchState, runConfig *contai
|
|||
func (b *Builder) create(runConfig *container.Config) (string, error) {
|
||||
logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd)
|
||||
|
||||
isWCOW := runtime.GOOS == "windows" && b.platform != nil && b.platform.OS == "windows"
|
||||
hostConfig := hostConfigFromOptions(b.options, isWCOW)
|
||||
hostConfig := hostConfigFromOptions(b.options)
|
||||
container, err := b.containerManager.Create(runConfig, hostConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -462,7 +460,7 @@ func (b *Builder) create(runConfig *container.Config) (string, error) {
|
|||
return container.ID, nil
|
||||
}
|
||||
|
||||
func hostConfigFromOptions(options *types.ImageBuildOptions, isWCOW bool) *container.HostConfig {
|
||||
func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConfig {
|
||||
resources := container.Resources{
|
||||
CgroupParent: options.CgroupParent,
|
||||
CPUShares: options.CPUShares,
|
||||
|
@ -485,16 +483,6 @@ func hostConfigFromOptions(options *types.ImageBuildOptions, isWCOW bool) *conta
|
|||
LogConfig: defaultLogConfig,
|
||||
ExtraHosts: options.ExtraHosts,
|
||||
}
|
||||
|
||||
// For WCOW, the default of 20GB hard-coded in the platform
|
||||
// is too small for builder scenarios where many users are
|
||||
// using RUN statements to install large amounts of data.
|
||||
// Use 127GB as that's the default size of a VHD in Hyper-V.
|
||||
if isWCOW {
|
||||
hc.StorageOpt = make(map[string]string)
|
||||
hc.StorageOpt["size"] = "127GB"
|
||||
}
|
||||
|
||||
return hc
|
||||
}
|
||||
|
||||
|
|
|
@ -187,23 +187,6 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr
|
|||
|
||||
ctr.HostConfig.StorageOpt = opts.params.HostConfig.StorageOpt
|
||||
|
||||
// Fixes: https://github.com/moby/moby/issues/34074 and
|
||||
// https://github.com/docker/for-win/issues/999.
|
||||
// Merge the daemon's storage options if they aren't already present. We only
|
||||
// do this on Windows as there's no effective sandbox size limit other than
|
||||
// physical on Linux.
|
||||
if isWindows {
|
||||
if ctr.HostConfig.StorageOpt == nil {
|
||||
ctr.HostConfig.StorageOpt = make(map[string]string)
|
||||
}
|
||||
for _, v := range daemon.configStore.GraphOptions {
|
||||
opt := strings.SplitN(v, "=", 2)
|
||||
if _, ok := ctr.HostConfig.StorageOpt[opt[0]]; !ok {
|
||||
ctr.HostConfig.StorageOpt[opt[0]] = opt[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set RWLayer for container after mount labels have been set
|
||||
rwLayer, err := daemon.imageService.CreateLayer(ctr, setupInitLayer(daemon.idMapping))
|
||||
if err != nil {
|
||||
|
|
|
@ -124,6 +124,7 @@ type Driver struct {
|
|||
cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch.
|
||||
options []string // Graphdriver options we are initialised with.
|
||||
globalMode bool // Indicates if running in an unsafe/global service VM mode.
|
||||
defaultSandboxSize uint64 // The default sandbox size to use if one is not specified
|
||||
|
||||
// NOTE: It is OK to use a cache here because Windows does not support
|
||||
// restoring containers when the daemon dies.
|
||||
|
@ -163,7 +164,8 @@ func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphd
|
|||
serviceVms: &serviceVMMap{
|
||||
svms: make(map[string]*serviceVMMapItem),
|
||||
},
|
||||
globalMode: false,
|
||||
globalMode: false,
|
||||
defaultSandboxSize: client.DefaultVhdxSizeGB,
|
||||
}
|
||||
|
||||
// Looks for relevant options
|
||||
|
@ -178,6 +180,16 @@ func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphd
|
|||
return nil, fmt.Errorf("%s failed to parse value for 'lcow.globalmode' - must be 'true' or 'false'", title)
|
||||
}
|
||||
break
|
||||
case "lcow.sandboxsize":
|
||||
var err error
|
||||
d.defaultSandboxSize, err = strconv.ParseUint(opt[1], 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s failed to parse value '%s' for 'lcow.sandboxsize'", title, v)
|
||||
}
|
||||
if d.defaultSandboxSize < client.DefaultVhdxSizeGB {
|
||||
return nil, fmt.Errorf("%s 'lcow.sandboxsize' option cannot be less than %d", title, client.DefaultVhdxSizeGB)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -517,7 +529,7 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts
|
|||
}
|
||||
|
||||
// Look for an explicit sandbox size option.
|
||||
sandboxSize := uint64(client.DefaultVhdxSizeGB)
|
||||
sandboxSize := d.defaultSandboxSize
|
||||
for k, v := range opts.StorageOpt {
|
||||
switch strings.ToLower(k) {
|
||||
case "lcow.sandboxsize":
|
||||
|
|
|
@ -38,8 +38,15 @@ import (
|
|||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// filterDriver is an HCSShim driver type for the Windows Filter driver.
|
||||
const filterDriver = 1
|
||||
const (
|
||||
// filterDriver is an HCSShim driver type for the Windows Filter driver.
|
||||
filterDriver = 1
|
||||
// For WCOW, the default of 20GB hard-coded in the platform
|
||||
// is too small for builder scenarios where many users are
|
||||
// using RUN or COPY statements to install large amounts of data.
|
||||
// Use 127GB as that's the default size of a VHD in Hyper-V.
|
||||
defaultSandboxSize = "127GB"
|
||||
)
|
||||
|
||||
var (
|
||||
// mutatedFiles is a list of files that are mutated by the import process
|
||||
|
@ -73,6 +80,10 @@ func (c *checker) IsMounted(path string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
type storageOptions struct {
|
||||
size uint64
|
||||
}
|
||||
|
||||
// Driver represents a windows graph driver.
|
||||
type Driver struct {
|
||||
// info stores the shim driver information
|
||||
|
@ -80,8 +91,9 @@ type Driver struct {
|
|||
ctr *graphdriver.RefCounter
|
||||
// it is safe for windows to use a cache here because it does not support
|
||||
// restoring containers when the daemon dies.
|
||||
cacheMu sync.Mutex
|
||||
cache map[string]string
|
||||
cacheMu sync.Mutex
|
||||
cache map[string]string
|
||||
defaultStorageOpts *storageOptions
|
||||
}
|
||||
|
||||
// InitFilter returns a new Windows storage filter driver.
|
||||
|
@ -100,13 +112,27 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap)
|
|||
return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err)
|
||||
}
|
||||
|
||||
storageOpt := make(map[string]string)
|
||||
storageOpt["size"] = defaultSandboxSize
|
||||
|
||||
for _, v := range options {
|
||||
opt := strings.SplitN(v, "=", 2)
|
||||
storageOpt[strings.ToLower(opt[0])] = opt[1]
|
||||
}
|
||||
|
||||
storageOptions, err := parseStorageOpt(storageOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("windowsfilter failed to parse default storage options - %s", err)
|
||||
}
|
||||
|
||||
d := &Driver{
|
||||
info: hcsshim.DriverInfo{
|
||||
HomeDir: home,
|
||||
Flavour: filterDriver,
|
||||
},
|
||||
cache: make(map[string]string),
|
||||
ctr: graphdriver.NewRefCounter(&checker{}),
|
||||
cache: make(map[string]string),
|
||||
ctr: graphdriver.NewRefCounter(&checker{}),
|
||||
defaultStorageOpts: storageOptions,
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
@ -231,8 +257,13 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
|
|||
return fmt.Errorf("Failed to parse storage options - %s", err)
|
||||
}
|
||||
|
||||
sandboxSize := d.defaultStorageOpts.size
|
||||
if storageOptions.size != 0 {
|
||||
if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil {
|
||||
sandboxSize = storageOptions.size
|
||||
}
|
||||
|
||||
if sandboxSize != 0 {
|
||||
if err := hcsshim.ExpandSandboxSize(d.info, id, sandboxSize); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -935,10 +966,6 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
|||
return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil
|
||||
}
|
||||
|
||||
type storageOptions struct {
|
||||
size uint64
|
||||
}
|
||||
|
||||
func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) {
|
||||
options := storageOptions{}
|
||||
|
||||
|
|
|
@ -363,7 +363,7 @@ Function Run-IntegrationTests() {
|
|||
$pinfo.FileName = "gotestsum.exe"
|
||||
$pinfo.WorkingDirectory = "$($PWD.Path)"
|
||||
$pinfo.UseShellExecute = $false
|
||||
$pinfo.Arguments = "--format=standard-verbose --jsonfile=$jsonFilePath --junitfile=$xmlFilePath -- $env:INTEGRATION_TESTFLAGS"
|
||||
$pinfo.Arguments = "--format=standard-verbose --jsonfile=$jsonFilePath --junitfile=$xmlFilePath -- -test.timeout=60m $env:INTEGRATION_TESTFLAGS"
|
||||
$p = New-Object System.Diagnostics.Process
|
||||
$p.StartInfo = $pinfo
|
||||
$p.Start() | Out-Null
|
||||
|
|
|
@ -523,6 +523,55 @@ RUN for g in $(seq 0 8); do dd if=/dev/urandom of=rnd bs=1K count=1 seek=$((1024
|
|||
assert.Check(t, is.Contains(out.String(), "Successfully built"))
|
||||
}
|
||||
|
||||
func TestBuildWCOWSandboxSize(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "windows", "only Windows has sandbox size control")
|
||||
ctx := context.TODO()
|
||||
defer setupTest(t)()
|
||||
|
||||
dockerfile := `FROM busybox AS intermediate
|
||||
WORKDIR C:\\stuff
|
||||
# Create and delete a 21GB file
|
||||
RUN fsutil file createnew C:\\stuff\\bigfile_0.txt 22548578304 && del bigfile_0.txt
|
||||
# Create three 7GB files
|
||||
RUN fsutil file createnew C:\\stuff\\bigfile_1.txt 7516192768
|
||||
RUN fsutil file createnew C:\\stuff\\bigfile_2.txt 7516192768
|
||||
RUN fsutil file createnew C:\\stuff\\bigfile_3.txt 7516192768
|
||||
# Copy that 21GB of data out into a new target
|
||||
FROM busybox
|
||||
COPY --from=intermediate C:\\stuff C:\\stuff
|
||||
`
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := tar.NewWriter(buf)
|
||||
writeTarRecord(t, w, "Dockerfile", dockerfile)
|
||||
err := w.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
apiclient := testEnv.APIClient()
|
||||
resp, err := apiclient.ImageBuild(ctx,
|
||||
buf,
|
||||
types.ImageBuildOptions{
|
||||
Remove: true,
|
||||
ForceRemove: true,
|
||||
})
|
||||
|
||||
out := bytes.NewBuffer(nil)
|
||||
assert.NilError(t, err)
|
||||
_, err = io.Copy(out, resp.Body)
|
||||
resp.Body.Close()
|
||||
assert.NilError(t, err)
|
||||
// The test passes if either:
|
||||
// - the image build succeeded; or
|
||||
// - The "COPY --from=intermediate" step ran out of space during re-exec'd writing of the transport layer information to hcsshim's temp directory
|
||||
// The latter case means we finished the COPY operation, so the sandbox must have been larger than 20GB, which was the test,
|
||||
// and _then_ ran out of space on the host during `importLayer` in the WindowsFilter graph driver, while committing the layer.
|
||||
// See https://github.com/moby/moby/pull/41636#issuecomment-723038517 for more details on the operations being done here.
|
||||
// Specifically, this happens on the Docker Jenkins CI Windows-RS5 build nodes.
|
||||
// The two parts of the acceptable-failure case are on different lines, so we need two regexp checks.
|
||||
assert.Check(t, is.Regexp("Successfully built|COPY --from=intermediate", out.String()))
|
||||
assert.Check(t, is.Regexp("Successfully built|re-exec error: exit status 1: output: write.*daemon\\\\\\\\tmp\\\\\\\\hcs.*bigfile_[1-3].txt: There is not enough space on the disk.", out.String()))
|
||||
}
|
||||
|
||||
func TestBuildWithEmptyDockerfile(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "broken in earlier versions")
|
||||
ctx := context.TODO()
|
||||
|
|
|
@ -285,7 +285,7 @@ func TestContainerVolumesMountedAsShared(t *testing.T) {
|
|||
|
||||
// Convert this directory into a shared mount point so that we do
|
||||
// not rely on propagation properties of parent mount.
|
||||
if err := mount.Mount(tmpDir1.Path(), tmpDir1.Path(), "none", "bind,private"); err != nil {
|
||||
if err := mount.MakePrivate(tmpDir1.Path()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
|
@ -293,7 +293,7 @@ func TestContainerVolumesMountedAsShared(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
if err := mount.Mount("none", tmpDir1.Path(), "none", "shared"); err != nil {
|
||||
if err := mount.MakeShared(tmpDir1.Path()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -342,7 +342,7 @@ func TestContainerVolumesMountedAsSlave(t *testing.T) {
|
|||
|
||||
// Convert this directory into a shared mount point so that we do
|
||||
// not rely on propagation properties of parent mount.
|
||||
if err := mount.Mount(tmpDir1.Path(), tmpDir1.Path(), "none", "bind,private"); err != nil {
|
||||
if err := mount.MakePrivate(tmpDir1.Path()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
|
@ -350,7 +350,7 @@ func TestContainerVolumesMountedAsSlave(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
if err := mount.Mount("none", tmpDir1.Path(), "none", "shared"); err != nil {
|
||||
if err := mount.MakeShared(tmpDir1.Path()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue