mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
1b3fef5333
Windows Server 2016 (RS1) reached end of support, and Docker Desktop requires
Windows 10 V19H2 (version 1909, build 18363) as a minimum.
This patch makes Windows Server RS5 / ltsc2019 (build 17763) the minimum version
to run the daemon, and removes some hacks for older versions of Windows.
There is one check remaining that checks for Windows RS3 for a workaround
on older versions, but recent changes in Windows seemed to have regressed
on the same issue, so I kept that code for now to check if we may need that
workaround (again);
085c6a98d5/daemon/graphdriver/windows/windows.go (L319-L341)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
164 lines
3.8 KiB
Go
164 lines
3.8 KiB
Go
package libnetwork
|
|
|
|
import (
|
|
"net"
|
|
|
|
"github.com/Microsoft/hcsshim"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
type policyLists struct {
|
|
ilb *hcsshim.PolicyList
|
|
elb *hcsshim.PolicyList
|
|
}
|
|
|
|
var lbPolicylistMap = make(map[*loadBalancer]*policyLists)
|
|
|
|
func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) {
|
|
if len(lb.vip) == 0 {
|
|
return
|
|
}
|
|
|
|
vip := lb.vip
|
|
ingressPorts := lb.service.ingressPorts
|
|
|
|
lb.Lock()
|
|
defer lb.Unlock()
|
|
//find the load balancer IP for the network.
|
|
var sourceVIP string
|
|
for _, e := range n.Endpoints() {
|
|
epInfo := e.Info()
|
|
if epInfo == nil {
|
|
continue
|
|
}
|
|
if epInfo.LoadBalancer() {
|
|
sourceVIP = epInfo.Iface().Address().IP.String()
|
|
break
|
|
}
|
|
}
|
|
|
|
if sourceVIP == "" {
|
|
logrus.Errorf("Failed to find load balancer IP for network %s", n.Name())
|
|
return
|
|
}
|
|
|
|
var endpoints []hcsshim.HNSEndpoint
|
|
|
|
for eid, be := range lb.backEnds {
|
|
if be.disabled {
|
|
continue
|
|
}
|
|
//Call HNS to get back ID (GUID) corresponding to the endpoint.
|
|
hnsEndpoint, err := hcsshim.GetHNSEndpointByName(eid)
|
|
if err != nil {
|
|
logrus.Errorf("Failed to find HNS ID for endpoint %v: %v", eid, err)
|
|
return
|
|
}
|
|
|
|
endpoints = append(endpoints, *hnsEndpoint)
|
|
}
|
|
|
|
if policies, ok := lbPolicylistMap[lb]; ok {
|
|
|
|
if policies.ilb != nil {
|
|
policies.ilb.Delete()
|
|
policies.ilb = nil
|
|
}
|
|
|
|
if policies.elb != nil {
|
|
policies.elb.Delete()
|
|
policies.elb = nil
|
|
}
|
|
delete(lbPolicylistMap, lb)
|
|
}
|
|
|
|
ilbPolicy, err := hcsshim.AddLoadBalancer(endpoints, true, sourceVIP, vip.String(), 0, 0, 0)
|
|
if err != nil {
|
|
logrus.Errorf("Failed to add ILB policy for service %s (%s) with endpoints %v using load balancer IP %s on network %s: %v",
|
|
lb.service.name, vip.String(), endpoints, sourceVIP, n.Name(), err)
|
|
return
|
|
}
|
|
|
|
lbPolicylistMap[lb] = &policyLists{
|
|
ilb: ilbPolicy,
|
|
}
|
|
|
|
publishedPorts := make(map[uint32]uint32)
|
|
|
|
for i, port := range ingressPorts {
|
|
protocol := uint16(6)
|
|
|
|
// Skip already published port
|
|
if publishedPorts[port.PublishedPort] == port.TargetPort {
|
|
continue
|
|
}
|
|
|
|
if port.Protocol == ProtocolUDP {
|
|
protocol = 17
|
|
}
|
|
|
|
// check if already has udp matching to add wild card publishing
|
|
for j := i + 1; j < len(ingressPorts); j++ {
|
|
if ingressPorts[j].TargetPort == port.TargetPort &&
|
|
ingressPorts[j].PublishedPort == port.PublishedPort {
|
|
protocol = 0
|
|
}
|
|
}
|
|
|
|
publishedPorts[port.PublishedPort] = port.TargetPort
|
|
|
|
lbPolicylistMap[lb].elb, err = hcsshim.AddLoadBalancer(endpoints, false, sourceVIP, "", protocol, uint16(port.TargetPort), uint16(port.PublishedPort))
|
|
if err != nil {
|
|
logrus.Errorf("Failed to add ELB policy for service %s (ip:%s target port:%v published port:%v) with endpoints %v using load balancer IP %s on network %s: %v",
|
|
lb.service.name, vip.String(), uint16(port.TargetPort), uint16(port.PublishedPort), endpoints, sourceVIP, n.Name(), err)
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
func (n *network) rmLBBackend(ip net.IP, lb *loadBalancer, rmService bool, fullRemove bool) {
|
|
if len(lb.vip) == 0 {
|
|
return
|
|
}
|
|
|
|
if numEnabledBackends(lb) > 0 {
|
|
// Reprogram HNS (actually VFP) with the existing backends.
|
|
n.addLBBackend(ip, lb)
|
|
} else {
|
|
lb.Lock()
|
|
defer lb.Unlock()
|
|
logrus.Debugf("No more backends for service %s (ip:%s). Removing all policies", lb.service.name, lb.vip.String())
|
|
|
|
if policyLists, ok := lbPolicylistMap[lb]; ok {
|
|
if policyLists.ilb != nil {
|
|
policyLists.ilb.Delete()
|
|
policyLists.ilb = nil
|
|
}
|
|
|
|
if policyLists.elb != nil {
|
|
policyLists.elb.Delete()
|
|
policyLists.elb = nil
|
|
}
|
|
delete(lbPolicylistMap, lb)
|
|
|
|
} else {
|
|
logrus.Errorf("Failed to find policies for service %s (%s)", lb.service.name, lb.vip.String())
|
|
}
|
|
}
|
|
}
|
|
|
|
func numEnabledBackends(lb *loadBalancer) int {
|
|
nEnabled := 0
|
|
for _, be := range lb.backEnds {
|
|
if !be.disabled {
|
|
nEnabled++
|
|
}
|
|
}
|
|
return nEnabled
|
|
}
|
|
|
|
func (sb *sandbox) populateLoadBalancers(ep *endpoint) {
|
|
}
|
|
|
|
func arrangeIngressFilterRule() {
|
|
}
|