Windows: require Windows Server RS5 / ltsc2019 (build 17763) as minimum

Windows Server 2016 (RS1) reached end of support, and Docker Desktop requires
Windows 10 V19H2 (version 1909, build 18363) as a minimum.

This patch makes Windows Server RS5 /  ltsc2019 (build 17763) the minimum version
to run the daemon, and removes some hacks for older versions of Windows.

There is one check remaining that checks for Windows RS3 for a workaround
on older versions, but recent changes in Windows seemed to have regressed
on the same issue, so I kept that code for now to check if we may need that
workaround (again);

085c6a98d5/daemon/graphdriver/windows/windows.go (L319-L341)

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2022-02-17 17:08:46 +01:00
parent 54d35c071d
commit 1b3fef5333
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
12 changed files with 130 additions and 254 deletions

View File

@ -12,7 +12,6 @@ import (
"time"
"unsafe"
"github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"golang.org/x/sys/windows"
@ -167,19 +166,11 @@ func registerService() error {
}
defer m.Disconnect()
depends := []string{}
// This dependency is required on build 14393 (RS1)
// it is added to the platform in newer builds
if osversion.Build() == osversion.RS1 {
depends = append(depends, "ConDrv")
}
c := mgr.Config{
ServiceType: windows.SERVICE_WIN32_OWN_PROCESS,
StartType: mgr.StartAutomatic,
ErrorControl: mgr.ErrorNormal,
Dependencies: depends,
Dependencies: []string{},
DisplayName: "Docker Engine",
}

View File

@ -118,15 +118,6 @@ func verifyPlatformContainerResources(resources *containertypes.Resources, isHyp
return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU())
}
if resources.NanoCPUs > 0 && isHyperv && osversion.Build() < osversion.RS3 {
leftoverNanoCPUs := resources.NanoCPUs % 1e9
if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 {
resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9
warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs)
warnings = append(warnings, warningString)
}
}
if len(resources.BlkioDeviceReadBps) > 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps")
}
@ -187,19 +178,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.
if hostConfig == nil {
return nil, nil
}
hyperv := daemon.runAsHyperVContainer(hostConfig)
// On RS5, we allow (but don't strictly support) process isolation on Client SKUs.
// Prior to RS5, we don't allow process isolation on Client SKUs.
// @engine maintainers. This block should not be removed. It partially enforces licensing
// restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this.
if !hyperv && system.IsWindowsClient() && osversion.Build() < osversion.RS5 {
return warnings, fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers")
}
w, err := verifyPlatformContainerResources(&hostConfig.Resources, hyperv)
warnings = append(warnings, w...)
return warnings, err
return verifyPlatformContainerResources(&hostConfig.Resources, daemon.runAsHyperVContainer(hostConfig))
}
// verifyDaemonSettings performs validation of daemon config struct
@ -211,11 +190,8 @@ func verifyDaemonSettings(config *config.Config) error {
func checkSystem() error {
// Validate the OS version. Note that dockerd.exe must be manifested for this
// call to return the correct version.
if osversion.Get().MajorVersion < 10 {
return fmt.Errorf("This version of Windows does not support the docker daemon")
}
if osversion.Build() < osversion.RS1 {
return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10")
if osversion.Get().MajorVersion < 10 || osversion.Build() < osversion.RS5 {
return fmt.Errorf("this version of Windows does not support the docker daemon (Windows build %d or higher is required)", osversion.RS5)
}
vmcompute := windows.NewLazySystemDLL("vmcompute.dll")
@ -598,12 +574,6 @@ func (daemon *Daemon) setDefaultIsolation() error {
daemon.defaultIsolation = containertypes.Isolation("hyperv")
}
if containertypes.Isolation(val).IsProcess() {
if system.IsWindowsClient() && osversion.Build() < osversion.RS5 {
// On RS5, we allow (but don't strictly support) process isolation on Client SKUs.
// @engine maintainers. This block should not be removed. It partially enforces licensing
// restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this.
return fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers")
}
daemon.defaultIsolation = containertypes.Isolation("process")
}
default:

View File

@ -7,7 +7,6 @@ import (
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim/osversion"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
@ -260,9 +259,6 @@ func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.S
if isHyperV {
return errors.New("device assignment is not supported for HyperV containers")
}
if osversion.Build() < osversion.RS5 {
return errors.New("device assignment requires Windows builds RS5 (17763+) or later")
}
for _, deviceMapping := range c.HostConfig.Devices {
srcParts := strings.SplitN(deviceMapping.PathOnHost, "/", 2)
if len(srcParts) != 2 {

View File

@ -12,7 +12,6 @@ import (
"testing"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/osversion"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
@ -22,8 +21,6 @@ import (
)
func (s *DockerSuite) TestContainersAPICreateMountsBindNamedPipe(c *testing.T) {
testRequires(c, testEnv.IsLocalDaemon, DaemonIsWindowsAtLeastBuild(osversion.RS3)) // Named pipe support was added in RS3
// Create a host pipe to map into the container
hostPipeName := fmt.Sprintf(`\\.\pipe\docker-cli-test-pipe-%x`, rand.Uint64())
pc := &winio.PipeConfig{

View File

@ -4,18 +4,14 @@ import (
"context"
"net/http"
"net/http/httptest"
"runtime"
"strconv"
"strings"
"testing"
"github.com/Microsoft/hcsshim/osversion"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/docker/docker/integration-cli/cli"
"github.com/docker/docker/integration-cli/cli/build"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/testutil/request"
"gotest.tools/v3/assert"
)
@ -59,17 +55,6 @@ func (s *DockerSuite) TestAPIImagesFilter(c *testing.T) {
}
func (s *DockerSuite) TestAPIImagesSaveAndLoad(c *testing.T) {
if runtime.GOOS == "windows" {
// Note we parse kernel.GetKernelVersion rather than osversion.Build()
// as test binaries aren't manifested, so would otherwise report build 9200.
v, err := kernel.GetKernelVersion()
assert.NilError(c, err)
buildNumber, _ := strconv.Atoi(strings.Split(strings.SplitN(v.String(), " ", 3)[2][1:], ".")[0])
if buildNumber <= osversion.RS3 {
c.Skip("Temporarily disabled on RS3 and older because they are too slow. See #39909")
}
}
testRequires(c, Network)
buildImageSuccessfully(c, "saveandload", build.WithDockerfile("FROM busybox\nENV FOO bar"))
id := getIDByName(c, "saveandload")
@ -141,17 +126,6 @@ func (s *DockerSuite) TestAPIImagesHistory(c *testing.T) {
}
func (s *DockerSuite) TestAPIImagesImportBadSrc(c *testing.T) {
if runtime.GOOS == "windows" {
// Note we parse kernel.GetKernelVersion rather than osversion.Build()
// as test binaries aren't manifested, so would otherwise report build 9200.
v, err := kernel.GetKernelVersion()
assert.NilError(c, err)
buildNumber, _ := strconv.Atoi(strings.Split(strings.SplitN(v.String(), " ", 3)[2][1:], ".")[0])
if buildNumber == osversion.RS3 {
c.Skip("Temporarily disabled on RS3 builds")
}
}
testRequires(c, Network, testEnv.IsLocalDaemon)
server := httptest.NewServer(http.NewServeMux())

View File

@ -22,7 +22,6 @@ import (
"testing"
"time"
"github.com/Microsoft/hcsshim/osversion"
"github.com/docker/docker/client"
"github.com/docker/docker/integration-cli/cli"
"github.com/docker/docker/integration-cli/cli/build"
@ -1877,11 +1876,6 @@ func (s *DockerSuite) TestRunBindMounts(c *testing.T) {
testRequires(c, DaemonIsLinux, NotUserNamespace)
}
if testEnv.OSType == "windows" {
// Disabled prior to RS5 due to how volumes are mapped
testRequires(c, DaemonIsWindowsAtLeastBuild(osversion.RS5))
}
prefix, _ := getPrefixAndSlashFromDaemonPlatform()
tmpDir, err := os.MkdirTemp("", "docker-test-container")
@ -4413,7 +4407,7 @@ func (s *DockerSuite) TestRunAddDeviceCgroupRule(c *testing.T) {
// Verifies that running as local system is operating correctly on Windows
func (s *DockerSuite) TestWindowsRunAsSystem(c *testing.T) {
testRequires(c, DaemonIsWindowsAtLeastBuild(osversion.RS3))
testRequires(c, DaemonIsWindows)
out, _ := dockerCmd(c, "run", "--net=none", `--user=nt authority\system`, "--hostname=XYZZY", minimalBaseImage(), "cmd", "/c", `@echo %USERNAME%`)
assert.Equal(c, strings.TrimSpace(out), "XYZZY$")
}

View File

@ -2,15 +2,11 @@ package main
import (
"fmt"
"runtime"
"strconv"
"strings"
"testing"
"time"
"github.com/Microsoft/hcsshim/osversion"
"github.com/docker/docker/integration-cli/cli"
"github.com/docker/docker/pkg/parsers/kernel"
"gotest.tools/v3/assert"
"gotest.tools/v3/icmd"
)
@ -190,18 +186,6 @@ func (s *DockerSuite) TestStartAttachWithRename(c *testing.T) {
}
func (s *DockerSuite) TestStartReturnCorrectExitCode(c *testing.T) {
// Note we parse kernel.GetKernelVersion rather than system.GetOSVersion
// as test binaries aren't manifested, so would otherwise report the wrong
// build number.
if runtime.GOOS == "windows" {
v, err := kernel.GetKernelVersion()
assert.NilError(c, err)
build, _ := strconv.Atoi(strings.Split(strings.SplitN(v.String(), " ", 3)[2][1:], ".")[0])
if build < osversion.RS3 {
c.Skip("FLAKY on Windows RS1, see #38521")
}
}
dockerCmd(c, "create", "--restart=on-failure:2", "--name", "withRestart", "busybox", "sh", "-c", "exit 11")
dockerCmd(c, "create", "--rm", "--name", "withRm", "busybox", "sh", "-c", "exit 12")

View File

@ -6,7 +6,6 @@ import (
"net/http"
"os"
"os/exec"
"strconv"
"strings"
"testing"
"time"
@ -27,17 +26,6 @@ func DaemonIsWindows() bool {
return testEnv.OSType == "windows"
}
func DaemonIsWindowsAtLeastBuild(buildNumber int) func() bool {
return func() bool {
if testEnv.OSType != "windows" {
return false
}
version := testEnv.DaemonInfo.KernelVersion
numVersion, _ := strconv.Atoi(strings.Split(version, " ")[1])
return numVersion >= buildNumber
}
}
func DaemonIsLinux() bool {
return testEnv.OSType == "linux"
}

View File

@ -16,7 +16,6 @@ import (
"time"
"github.com/Microsoft/hcsshim"
"github.com/Microsoft/hcsshim/osversion"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
containerderrdefs "github.com/containerd/containerd/errdefs"
@ -305,9 +304,6 @@ func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions inter
}
}
configuration.MappedDirectories = mds
if len(mps) > 0 && osversion.Build() < osversion.RS3 {
return errors.New("named pipe mounts are not supported on this version of Windows")
}
configuration.MappedPipes = mps
if len(spec.Windows.Devices) > 0 {
@ -315,9 +311,6 @@ func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions inter
if configuration.HvPartition {
return errors.New("device assignment is not supported for HyperV containers")
}
if osversion.Build() < osversion.RS5 {
return errors.New("device assignment requires Windows builds RS5 (17763+) or later")
}
for _, d := range spec.Windows.Devices {
configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
}

View File

@ -155,43 +155,41 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
hnsEndpoint.Policies = append(hnsEndpoint.Policies, paPolicy)
if osversion.Build() > 16236 {
natPolicy, err := json.Marshal(hcsshim.PaPolicy{
Type: "OutBoundNAT",
})
natPolicy, err := json.Marshal(hcsshim.PaPolicy{
Type: "OutBoundNAT",
})
if err != nil {
return err
}
hnsEndpoint.Policies = append(hnsEndpoint.Policies, natPolicy)
epConnectivity, err := windows.ParseEndpointConnectivity(epOptions)
if err != nil {
return err
}
ep.portMapping = epConnectivity.PortBindings
ep.portMapping, err = windows.AllocatePorts(n.portMapper, ep.portMapping, ep.addr.IP)
if err != nil {
return err
}
defer func() {
if err != nil {
windows.ReleasePorts(n.portMapper, ep.portMapping)
}
}()
pbPolicy, err := windows.ConvertPortBindings(ep.portMapping)
if err != nil {
return err
}
hnsEndpoint.Policies = append(hnsEndpoint.Policies, pbPolicy...)
ep.disablegateway = true
if err != nil {
return err
}
hnsEndpoint.Policies = append(hnsEndpoint.Policies, natPolicy)
epConnectivity, err := windows.ParseEndpointConnectivity(epOptions)
if err != nil {
return err
}
ep.portMapping = epConnectivity.PortBindings
ep.portMapping, err = windows.AllocatePorts(n.portMapper, ep.portMapping, ep.addr.IP)
if err != nil {
return err
}
defer func() {
if err != nil {
windows.ReleasePorts(n.portMapper, ep.portMapping)
}
}()
pbPolicy, err := windows.ConvertPortBindings(ep.portMapping)
if err != nil {
return err
}
hnsEndpoint.Policies = append(hnsEndpoint.Policies, pbPolicy...)
ep.disablegateway = true
configurationb, err := json.Marshal(hnsEndpoint)
if err != nil {
return err

View File

@ -21,7 +21,6 @@ import (
"sync"
"github.com/Microsoft/hcsshim"
"github.com/Microsoft/hcsshim/osversion"
"github.com/docker/docker/libnetwork/datastore"
"github.com/docker/docker/libnetwork/discoverapi"
"github.com/docker/docker/libnetwork/driverapi"
@ -218,9 +217,6 @@ func (d *driver) parseNetworkOptions(id string, genericOptions map[string]string
}
config.VSID = uint(vsid)
case EnableOutboundNat:
if osversion.Build() <= 16236 {
return nil, fmt.Errorf("Invalid network option. OutboundNat is not supported on this OS version")
}
b, err := strconv.ParseBool(value)
if err != nil {
return nil, err

View File

@ -4,7 +4,6 @@ import (
"net"
"github.com/Microsoft/hcsshim"
"github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
)
@ -23,98 +22,96 @@ func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) {
vip := lb.vip
ingressPorts := lb.service.ingressPorts
if osversion.Build() > 16236 {
lb.Lock()
defer lb.Unlock()
//find the load balancer IP for the network.
var sourceVIP string
for _, e := range n.Endpoints() {
epInfo := e.Info()
if epInfo == nil {
continue
}
if epInfo.LoadBalancer() {
sourceVIP = epInfo.Iface().Address().IP.String()
break
}
lb.Lock()
defer lb.Unlock()
//find the load balancer IP for the network.
var sourceVIP string
for _, e := range n.Endpoints() {
epInfo := e.Info()
if epInfo == nil {
continue
}
if sourceVIP == "" {
logrus.Errorf("Failed to find load balancer IP for network %s", n.Name())
return
if epInfo.LoadBalancer() {
sourceVIP = epInfo.Iface().Address().IP.String()
break
}
}
var endpoints []hcsshim.HNSEndpoint
if sourceVIP == "" {
logrus.Errorf("Failed to find load balancer IP for network %s", n.Name())
return
}
for eid, be := range lb.backEnds {
if be.disabled {
continue
}
//Call HNS to get back ID (GUID) corresponding to the endpoint.
hnsEndpoint, err := hcsshim.GetHNSEndpointByName(eid)
if err != nil {
logrus.Errorf("Failed to find HNS ID for endpoint %v: %v", eid, err)
return
}
var endpoints []hcsshim.HNSEndpoint
endpoints = append(endpoints, *hnsEndpoint)
for eid, be := range lb.backEnds {
if be.disabled {
continue
}
if policies, ok := lbPolicylistMap[lb]; ok {
if policies.ilb != nil {
policies.ilb.Delete()
policies.ilb = nil
}
if policies.elb != nil {
policies.elb.Delete()
policies.elb = nil
}
delete(lbPolicylistMap, lb)
}
ilbPolicy, err := hcsshim.AddLoadBalancer(endpoints, true, sourceVIP, vip.String(), 0, 0, 0)
//Call HNS to get back ID (GUID) corresponding to the endpoint.
hnsEndpoint, err := hcsshim.GetHNSEndpointByName(eid)
if err != nil {
logrus.Errorf("Failed to add ILB policy for service %s (%s) with endpoints %v using load balancer IP %s on network %s: %v",
lb.service.name, vip.String(), endpoints, sourceVIP, n.Name(), err)
logrus.Errorf("Failed to find HNS ID for endpoint %v: %v", eid, err)
return
}
lbPolicylistMap[lb] = &policyLists{
ilb: ilbPolicy,
endpoints = append(endpoints, *hnsEndpoint)
}
if policies, ok := lbPolicylistMap[lb]; ok {
if policies.ilb != nil {
policies.ilb.Delete()
policies.ilb = nil
}
publishedPorts := make(map[uint32]uint32)
if policies.elb != nil {
policies.elb.Delete()
policies.elb = nil
}
delete(lbPolicylistMap, lb)
}
for i, port := range ingressPorts {
protocol := uint16(6)
ilbPolicy, err := hcsshim.AddLoadBalancer(endpoints, true, sourceVIP, vip.String(), 0, 0, 0)
if err != nil {
logrus.Errorf("Failed to add ILB policy for service %s (%s) with endpoints %v using load balancer IP %s on network %s: %v",
lb.service.name, vip.String(), endpoints, sourceVIP, n.Name(), err)
return
}
// Skip already published port
if publishedPorts[port.PublishedPort] == port.TargetPort {
continue
lbPolicylistMap[lb] = &policyLists{
ilb: ilbPolicy,
}
publishedPorts := make(map[uint32]uint32)
for i, port := range ingressPorts {
protocol := uint16(6)
// Skip already published port
if publishedPorts[port.PublishedPort] == port.TargetPort {
continue
}
if port.Protocol == ProtocolUDP {
protocol = 17
}
// check if already has udp matching to add wild card publishing
for j := i + 1; j < len(ingressPorts); j++ {
if ingressPorts[j].TargetPort == port.TargetPort &&
ingressPorts[j].PublishedPort == port.PublishedPort {
protocol = 0
}
}
if port.Protocol == ProtocolUDP {
protocol = 17
}
publishedPorts[port.PublishedPort] = port.TargetPort
// check if already has udp matching to add wild card publishing
for j := i + 1; j < len(ingressPorts); j++ {
if ingressPorts[j].TargetPort == port.TargetPort &&
ingressPorts[j].PublishedPort == port.PublishedPort {
protocol = 0
}
}
publishedPorts[port.PublishedPort] = port.TargetPort
lbPolicylistMap[lb].elb, err = hcsshim.AddLoadBalancer(endpoints, false, sourceVIP, "", protocol, uint16(port.TargetPort), uint16(port.PublishedPort))
if err != nil {
logrus.Errorf("Failed to add ELB policy for service %s (ip:%s target port:%v published port:%v) with endpoints %v using load balancer IP %s on network %s: %v",
lb.service.name, vip.String(), uint16(port.TargetPort), uint16(port.PublishedPort), endpoints, sourceVIP, n.Name(), err)
return
}
lbPolicylistMap[lb].elb, err = hcsshim.AddLoadBalancer(endpoints, false, sourceVIP, "", protocol, uint16(port.TargetPort), uint16(port.PublishedPort))
if err != nil {
logrus.Errorf("Failed to add ELB policy for service %s (ip:%s target port:%v published port:%v) with endpoints %v using load balancer IP %s on network %s: %v",
lb.service.name, vip.String(), uint16(port.TargetPort), uint16(port.PublishedPort), endpoints, sourceVIP, n.Name(), err)
return
}
}
}
@ -124,30 +121,28 @@ func (n *network) rmLBBackend(ip net.IP, lb *loadBalancer, rmService bool, fullR
return
}
if osversion.Build() > 16236 {
if numEnabledBackends(lb) > 0 {
//Reprogram HNS (actually VFP) with the existing backends.
n.addLBBackend(ip, lb)
} else {
lb.Lock()
defer lb.Unlock()
logrus.Debugf("No more backends for service %s (ip:%s). Removing all policies", lb.service.name, lb.vip.String())
if numEnabledBackends(lb) > 0 {
// Reprogram HNS (actually VFP) with the existing backends.
n.addLBBackend(ip, lb)
} else {
lb.Lock()
defer lb.Unlock()
logrus.Debugf("No more backends for service %s (ip:%s). Removing all policies", lb.service.name, lb.vip.String())
if policyLists, ok := lbPolicylistMap[lb]; ok {
if policyLists.ilb != nil {
policyLists.ilb.Delete()
policyLists.ilb = nil
}
if policyLists.elb != nil {
policyLists.elb.Delete()
policyLists.elb = nil
}
delete(lbPolicylistMap, lb)
} else {
logrus.Errorf("Failed to find policies for service %s (%s)", lb.service.name, lb.vip.String())
if policyLists, ok := lbPolicylistMap[lb]; ok {
if policyLists.ilb != nil {
policyLists.ilb.Delete()
policyLists.ilb = nil
}
if policyLists.elb != nil {
policyLists.elb.Delete()
policyLists.elb = nil
}
delete(lbPolicylistMap, lb)
} else {
logrus.Errorf("Failed to find policies for service %s (%s)", lb.service.name, lb.vip.String())
}
}
}