mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Add pids-limit support in docker update
- Adds updating PidsLimit in UpdateContainer(). - Adds setting PidsLimit in toContainerResources(). Signed-off-by: Sunny Gogoi <indiasuny000@gmail.com> Signed-off-by: Brian Goff <cpuguy83@gmail.com>
This commit is contained in:
parent
d440fea460
commit
74eb258ffb
11 changed files with 98 additions and 10 deletions
|
@ -460,9 +460,10 @@ definitions:
|
|||
type: "boolean"
|
||||
x-nullable: true
|
||||
PidsLimit:
|
||||
description: "Tune a container's pids limit. Set -1 for unlimited."
|
||||
description: "Tune a container's pids limit. Set 0 or -1 for unlimited. Leave null to not change"
|
||||
type: "integer"
|
||||
format: "int64"
|
||||
x-nullable: true
|
||||
Ulimits:
|
||||
description: |
|
||||
A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"
|
||||
|
@ -3689,6 +3690,10 @@ definitions:
|
|||
See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)
|
||||
type: "boolean"
|
||||
example: true
|
||||
PidsLimit:
|
||||
description: "Indicates if the host kernel has PID limit support enabled."
|
||||
type: "boolean"
|
||||
example: true
|
||||
OomKillDisable:
|
||||
description: "Indicates if OOM killer disable is supported on the host."
|
||||
type: "boolean"
|
||||
|
@ -4625,7 +4630,7 @@ paths:
|
|||
OomKillDisable: false
|
||||
OomScoreAdj: 500
|
||||
PidMode: ""
|
||||
PidsLimit: -1
|
||||
PidsLimit: 0
|
||||
PortBindings:
|
||||
22/tcp:
|
||||
- HostPort: "11022"
|
||||
|
|
|
@ -334,7 +334,7 @@ type Resources struct {
|
|||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit int64 // Setting pids limit for a container
|
||||
PidsLimit *int64 // Setting pids limit for a container
|
||||
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
||||
|
||||
// Applicable to Windows
|
||||
|
|
|
@ -164,6 +164,7 @@ type Info struct {
|
|||
CPUCfsQuota bool `json:"CpuCfsQuota"`
|
||||
CPUShares bool
|
||||
CPUSet bool
|
||||
PidsLimit bool
|
||||
IPv4Forwarding bool
|
||||
BridgeNfIptables bool
|
||||
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
|
||||
|
|
|
@ -342,6 +342,9 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi
|
|||
if resources.CPURealtimeRuntime != 0 {
|
||||
cResources.CPURealtimeRuntime = resources.CPURealtimeRuntime
|
||||
}
|
||||
if resources.PidsLimit != nil {
|
||||
cResources.PidsLimit = resources.PidsLimit
|
||||
}
|
||||
|
||||
// update HostConfig of container
|
||||
if hostConfig.RestartPolicy.Name != "" {
|
||||
|
|
|
@ -159,7 +159,7 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi
|
|||
resources.MemorySwap != 0 ||
|
||||
resources.MemorySwappiness != nil ||
|
||||
resources.OomKillDisable != nil ||
|
||||
resources.PidsLimit != 0 ||
|
||||
(resources.PidsLimit != nil && *resources.PidsLimit != 0) ||
|
||||
len(resources.Ulimits) != 0 ||
|
||||
resources.CPUCount != 0 ||
|
||||
resources.CPUPercent != 0 ||
|
||||
|
|
|
@ -118,6 +118,19 @@ func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory {
|
|||
return &memory
|
||||
}
|
||||
|
||||
func getPidsLimit(config containertypes.Resources) *specs.LinuxPids {
|
||||
limit := &specs.LinuxPids{}
|
||||
if config.PidsLimit != nil {
|
||||
limit.Limit = *config.PidsLimit
|
||||
if limit.Limit == 0 {
|
||||
// docker API allows 0 to unset this to be consistent with default values.
|
||||
// when updating values, runc requires -1
|
||||
limit.Limit = -1
|
||||
}
|
||||
}
|
||||
return limit
|
||||
}
|
||||
|
||||
func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) {
|
||||
cpu := specs.LinuxCPU{}
|
||||
|
||||
|
@ -453,9 +466,10 @@ func verifyPlatformContainerResources(resources *containertypes.Resources, sysIn
|
|||
if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 {
|
||||
warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.")
|
||||
}
|
||||
if resources.PidsLimit != 0 && !sysInfo.PidsLimit {
|
||||
if resources.PidsLimit != nil && *resources.PidsLimit != 0 && !sysInfo.PidsLimit {
|
||||
warnings = append(warnings, "Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.")
|
||||
resources.PidsLimit = 0
|
||||
var limit int64
|
||||
resources.PidsLimit = &limit
|
||||
}
|
||||
|
||||
// cpu subsystem checks and adjustments
|
||||
|
|
|
@ -181,7 +181,7 @@ func verifyPlatformContainerResources(resources *containertypes.Resources, isHyp
|
|||
if resources.OomKillDisable != nil && *resources.OomKillDisable {
|
||||
return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable")
|
||||
}
|
||||
if resources.PidsLimit != 0 {
|
||||
if resources.PidsLimit != nil && *resources.PidsLimit != 0 {
|
||||
return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit")
|
||||
}
|
||||
if len(resources.Ulimits) != 0 {
|
||||
|
|
|
@ -27,6 +27,7 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo)
|
|||
v.CPUCfsQuota = sysInfo.CPUCfsQuota
|
||||
v.CPUShares = sysInfo.CPUShares
|
||||
v.CPUSet = sysInfo.Cpuset
|
||||
v.PidsLimit = sysInfo.PidsLimit
|
||||
v.Runtimes = daemon.configStore.GetAllRuntimes()
|
||||
v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()
|
||||
v.InitBinary = daemon.configStore.GetInitPath()
|
||||
|
|
|
@ -72,9 +72,7 @@ func setResources(s *specs.Spec, r containertypes.Resources) error {
|
|||
ThrottleReadIOPSDevice: readIOpsDevice,
|
||||
ThrottleWriteIOPSDevice: writeIOpsDevice,
|
||||
},
|
||||
Pids: &specs.LinuxPids{
|
||||
Limit: r.PidsLimit,
|
||||
},
|
||||
Pids: getPidsLimit(r),
|
||||
}
|
||||
|
||||
if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 {
|
||||
|
|
|
@ -50,5 +50,6 @@ func toContainerdResources(resources container.Resources) *libcontainerd.Resourc
|
|||
r.Memory.Swap = &resources.MemorySwap
|
||||
}
|
||||
|
||||
r.Pids = getPidsLimit(resources)
|
||||
return &r
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"gotest.tools/assert"
|
||||
|
@ -101,3 +102,67 @@ func TestUpdateCPUQuota(t *testing.T) {
|
|||
assert.Check(t, is.Equal(strconv.FormatInt(test.update, 10), strings.TrimSpace(res.Stdout())))
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdatePidsLimit(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
skip.If(t, !testEnv.DaemonInfo.PidsLimit)
|
||||
|
||||
defer setupTest(t)()
|
||||
client := testEnv.APIClient()
|
||||
ctx := context.Background()
|
||||
|
||||
cID := container.Run(t, ctx, client)
|
||||
|
||||
intPtr := func(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
update *int64
|
||||
expect int64
|
||||
expectCg string
|
||||
}{
|
||||
{desc: "update from none", update: intPtr(32), expect: 32, expectCg: "32"},
|
||||
{desc: "no change", update: nil, expectCg: "32"},
|
||||
{desc: "update lower", update: intPtr(16), expect: 16, expectCg: "16"},
|
||||
{desc: "unset limit", update: intPtr(0), expect: 0, expectCg: "max"},
|
||||
} {
|
||||
var before types.ContainerJSON
|
||||
if test.update == nil {
|
||||
var err error
|
||||
before, err = client.ContainerInspect(ctx, cID)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
_, err := client.ContainerUpdate(ctx, cID, containertypes.UpdateConfig{
|
||||
Resources: containertypes.Resources{
|
||||
PidsLimit: test.update,
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
|
||||
inspect, err := client.ContainerInspect(ctx, cID)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, inspect.HostConfig.Resources.PidsLimit != nil)
|
||||
|
||||
if test.update == nil {
|
||||
assert.Assert(t, before.HostConfig.Resources.PidsLimit != nil)
|
||||
assert.Equal(t, *before.HostConfig.Resources.PidsLimit, *inspect.HostConfig.Resources.PidsLimit)
|
||||
} else {
|
||||
assert.Equal(t, *inspect.HostConfig.Resources.PidsLimit, test.expect)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
res, err := container.Exec(ctx, client, cID, []string{"cat", "/sys/fs/cgroup/pids/pids.max"})
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, is.Len(res.Stderr(), 0))
|
||||
|
||||
out := strings.TrimSpace(res.Stdout())
|
||||
assert.Equal(t, out, test.expectCg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue