mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #37296 from yusuf-gunaydin/lcow_limits
Implemented memory and CPU limits for LCOW.
This commit is contained in:
commit
8e06006717
2 changed files with 80 additions and 64 deletions
|
@ -250,45 +250,7 @@ func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.S
|
||||||
// First boot optimization
|
// First boot optimization
|
||||||
s.Windows.IgnoreFlushesDuringBoot = !c.HasBeenStartedBefore
|
s.Windows.IgnoreFlushesDuringBoot = !c.HasBeenStartedBefore
|
||||||
|
|
||||||
// In s.Windows.Resources
|
setResourcesInSpec(c, s, isHyperV)
|
||||||
cpuShares := uint16(c.HostConfig.CPUShares)
|
|
||||||
cpuMaximum := uint16(c.HostConfig.CPUPercent) * 100
|
|
||||||
cpuCount := uint64(c.HostConfig.CPUCount)
|
|
||||||
if c.HostConfig.NanoCPUs > 0 {
|
|
||||||
if isHyperV {
|
|
||||||
cpuCount = uint64(c.HostConfig.NanoCPUs / 1e9)
|
|
||||||
leftoverNanoCPUs := c.HostConfig.NanoCPUs % 1e9
|
|
||||||
if leftoverNanoCPUs != 0 {
|
|
||||||
cpuCount++
|
|
||||||
cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(cpuCount) / (1e9 / 10000))
|
|
||||||
if cpuMaximum < 1 {
|
|
||||||
// The requested NanoCPUs is so small that we rounded to 0, use 1 instead
|
|
||||||
cpuMaximum = 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(sysinfo.NumCPU()) / (1e9 / 10000))
|
|
||||||
if cpuMaximum < 1 {
|
|
||||||
// The requested NanoCPUs is so small that we rounded to 0, use 1 instead
|
|
||||||
cpuMaximum = 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
memoryLimit := uint64(c.HostConfig.Memory)
|
|
||||||
s.Windows.Resources = &specs.WindowsResources{
|
|
||||||
CPU: &specs.WindowsCPUResources{
|
|
||||||
Maximum: &cpuMaximum,
|
|
||||||
Shares: &cpuShares,
|
|
||||||
Count: &cpuCount,
|
|
||||||
},
|
|
||||||
Memory: &specs.WindowsMemoryResources{
|
|
||||||
Limit: &memoryLimit,
|
|
||||||
},
|
|
||||||
Storage: &specs.WindowsStorageResources{
|
|
||||||
Bps: &c.HostConfig.IOMaximumBandwidth,
|
|
||||||
Iops: &c.HostConfig.IOMaximumIOps,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read and add credentials from the security options if a credential spec has been provided.
|
// Read and add credentials from the security options if a credential spec has been provided.
|
||||||
if c.HostConfig.SecurityOpt != nil {
|
if c.HostConfig.SecurityOpt != nil {
|
||||||
|
@ -369,6 +331,9 @@ func (daemon *Daemon) createSpecLinuxFields(c *container.Container, s *specs.Spe
|
||||||
}
|
}
|
||||||
s.Root.Path = "rootfs"
|
s.Root.Path = "rootfs"
|
||||||
s.Root.Readonly = c.HostConfig.ReadonlyRootfs
|
s.Root.Readonly = c.HostConfig.ReadonlyRootfs
|
||||||
|
|
||||||
|
setResourcesInSpec(c, s, true) // LCOW is Hyper-V only
|
||||||
|
|
||||||
capabilities, err := caps.TweakCapabilities(oci.DefaultCapabilities(), c.HostConfig.CapAdd, c.HostConfig.CapDrop, c.HostConfig.Capabilities, c.HostConfig.Privileged)
|
capabilities, err := caps.TweakCapabilities(oci.DefaultCapabilities(), c.HostConfig.CapAdd, c.HostConfig.CapDrop, c.HostConfig.Capabilities, c.HostConfig.Privileged)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("linux spec capabilities: %v", err)
|
return fmt.Errorf("linux spec capabilities: %v", err)
|
||||||
|
@ -384,6 +349,48 @@ func (daemon *Daemon) createSpecLinuxFields(c *container.Container, s *specs.Spe
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setResourcesInSpec(c *container.Container, s *specs.Spec, isHyperV bool) {
|
||||||
|
// In s.Windows.Resources
|
||||||
|
cpuShares := uint16(c.HostConfig.CPUShares)
|
||||||
|
cpuMaximum := uint16(c.HostConfig.CPUPercent) * 100
|
||||||
|
cpuCount := uint64(c.HostConfig.CPUCount)
|
||||||
|
if c.HostConfig.NanoCPUs > 0 {
|
||||||
|
if isHyperV {
|
||||||
|
cpuCount = uint64(c.HostConfig.NanoCPUs / 1e9)
|
||||||
|
leftoverNanoCPUs := c.HostConfig.NanoCPUs % 1e9
|
||||||
|
if leftoverNanoCPUs != 0 {
|
||||||
|
cpuCount++
|
||||||
|
cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(cpuCount) / (1e9 / 10000))
|
||||||
|
if cpuMaximum < 1 {
|
||||||
|
// The requested NanoCPUs is so small that we rounded to 0, use 1 instead
|
||||||
|
cpuMaximum = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(sysinfo.NumCPU()) / (1e9 / 10000))
|
||||||
|
if cpuMaximum < 1 {
|
||||||
|
// The requested NanoCPUs is so small that we rounded to 0, use 1 instead
|
||||||
|
cpuMaximum = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
memoryLimit := uint64(c.HostConfig.Memory)
|
||||||
|
s.Windows.Resources = &specs.WindowsResources{
|
||||||
|
CPU: &specs.WindowsCPUResources{
|
||||||
|
Maximum: &cpuMaximum,
|
||||||
|
Shares: &cpuShares,
|
||||||
|
Count: &cpuCount,
|
||||||
|
},
|
||||||
|
Memory: &specs.WindowsMemoryResources{
|
||||||
|
Limit: &memoryLimit,
|
||||||
|
},
|
||||||
|
Storage: &specs.WindowsStorageResources{
|
||||||
|
Bps: &c.HostConfig.IOMaximumBandwidth,
|
||||||
|
Iops: &c.HostConfig.IOMaximumIOps,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func escapeArgs(args []string) []string {
|
func escapeArgs(args []string) []string {
|
||||||
escapedArgs := make([]string, len(args))
|
escapedArgs := make([]string, len(args))
|
||||||
for i, a := range args {
|
for i, a := range args {
|
||||||
|
|
|
@ -176,32 +176,9 @@ func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions inter
|
||||||
HvPartition: false,
|
HvPartition: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.extractResourcesFromSpec(spec, configuration)
|
||||||
|
|
||||||
if spec.Windows.Resources != nil {
|
if spec.Windows.Resources != nil {
|
||||||
if spec.Windows.Resources.CPU != nil {
|
|
||||||
if spec.Windows.Resources.CPU.Count != nil {
|
|
||||||
// This check is being done here rather than in adaptContainerSettings
|
|
||||||
// because we don't want to update the HostConfig in case this container
|
|
||||||
// is moved to a host with more CPUs than this one.
|
|
||||||
cpuCount := *spec.Windows.Resources.CPU.Count
|
|
||||||
hostCPUCount := uint64(sysinfo.NumCPU())
|
|
||||||
if cpuCount > hostCPUCount {
|
|
||||||
c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
|
|
||||||
cpuCount = hostCPUCount
|
|
||||||
}
|
|
||||||
configuration.ProcessorCount = uint32(cpuCount)
|
|
||||||
}
|
|
||||||
if spec.Windows.Resources.CPU.Shares != nil {
|
|
||||||
configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
|
|
||||||
}
|
|
||||||
if spec.Windows.Resources.CPU.Maximum != nil {
|
|
||||||
configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if spec.Windows.Resources.Memory != nil {
|
|
||||||
if spec.Windows.Resources.Memory.Limit != nil {
|
|
||||||
configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if spec.Windows.Resources.Storage != nil {
|
if spec.Windows.Resources.Storage != nil {
|
||||||
if spec.Windows.Resources.Storage.Bps != nil {
|
if spec.Windows.Resources.Storage.Bps != nil {
|
||||||
configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
|
configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
|
||||||
|
@ -417,6 +394,8 @@ func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interfa
|
||||||
return fmt.Errorf("spec.Windows must not be nil for LCOW containers")
|
return fmt.Errorf("spec.Windows must not be nil for LCOW containers")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.extractResourcesFromSpec(spec, configuration)
|
||||||
|
|
||||||
// We must have least one layer in the spec
|
// We must have least one layer in the spec
|
||||||
if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 {
|
if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 {
|
||||||
return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime")
|
return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime")
|
||||||
|
@ -598,6 +577,36 @@ func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interfa
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
|
||||||
|
if spec.Windows.Resources != nil {
|
||||||
|
if spec.Windows.Resources.CPU != nil {
|
||||||
|
if spec.Windows.Resources.CPU.Count != nil {
|
||||||
|
// This check is being done here rather than in adaptContainerSettings
|
||||||
|
// because we don't want to update the HostConfig in case this container
|
||||||
|
// is moved to a host with more CPUs than this one.
|
||||||
|
cpuCount := *spec.Windows.Resources.CPU.Count
|
||||||
|
hostCPUCount := uint64(sysinfo.NumCPU())
|
||||||
|
if cpuCount > hostCPUCount {
|
||||||
|
c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
|
||||||
|
cpuCount = hostCPUCount
|
||||||
|
}
|
||||||
|
configuration.ProcessorCount = uint32(cpuCount)
|
||||||
|
}
|
||||||
|
if spec.Windows.Resources.CPU.Shares != nil {
|
||||||
|
configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
|
||||||
|
}
|
||||||
|
if spec.Windows.Resources.CPU.Maximum != nil {
|
||||||
|
configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if spec.Windows.Resources.Memory != nil {
|
||||||
|
if spec.Windows.Resources.Memory.Limit != nil {
|
||||||
|
configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio StdioCallback) (int, error) {
|
func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio StdioCallback) (int, error) {
|
||||||
ctr := c.getContainer(id)
|
ctr := c.getContainer(id)
|
||||||
switch {
|
switch {
|
||||||
|
|
Loading…
Add table
Reference in a new issue