1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00
moby--moby/libnetwork/service_windows.go
Chris Telfer ea2fa20859 Add endpoint load-balancing mode
This is the heart of the scalability change for services in libnetwork.
The present routing mesh adds load-balancing rules for a network to
every container connected to the network.  This newer approach creates a
load-balancing endpoint per network per node.  For every service on a
network, libnetwork assigns the VIP of the service to the endpoint's
interface as an alias.  This endpoint must have a unique IP address in
order to route return traffic to it.  Traffic destined for a service's
VIP arrives at the load-balancing endpoint on the VIP and from there,
Linux load balances it among backend destinations while SNATing said
traffic to the endpoint's unique IP address.

The net result of this scheme is that each node in a swarm need only
have one set of load balancing state per service instead of one per
container on the node.  This scheme is very similar to how services
currently operate on Windows nodes in libnetwork.  It (as with Windows
nodes) costs the use of extra IP addresses in a network (one per node)
and an extra network hop in the stack, although, always in the stack
local to the container.

In order to prevent existing deployments from suddenly failing if they
failed to allocate sufficient address space to include per-node
load-balancing endpoint IP addresses, this patch preserves the existing
functionality and activates the new functionality on a per-network
basis depending on whether the network has a load-balancing endpoint.
Eventually, moby should always set this option when creating new
networks and should only omit it for networks created as part of a swarm
that are not marked to use endpoint load balancing.

This patch also normalizes the code to treat "load" and "balancer"
as two separate words from the perspectives of variable/function naming.
This means that the 'b' in "balancer" must be capitalized.

Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-06-28 12:08:18 -04:00

173 lines
4.1 KiB
Go

package libnetwork
import (
"net"
"github.com/Microsoft/hcsshim"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
type policyLists struct {
ilb *hcsshim.PolicyList
elb *hcsshim.PolicyList
}
var lbPolicylistMap map[*loadBalancer]*policyLists
func init() {
lbPolicylistMap = make(map[*loadBalancer]*policyLists)
}
func (n *network) addLBBackend(ip net.IP, lb *loadBalancer) {
if len(lb.vip) == 0 {
return
}
vip := lb.vip
ingressPorts := lb.service.ingressPorts
if system.GetOSVersion().Build > 16236 {
lb.Lock()
defer lb.Unlock()
//find the load balancer IP for the network.
var sourceVIP string
for _, e := range n.Endpoints() {
epInfo := e.Info()
if epInfo == nil {
continue
}
if epInfo.LoadBalancer() {
sourceVIP = epInfo.Iface().Address().IP.String()
break
}
}
if sourceVIP == "" {
logrus.Errorf("Failed to find load balancer IP for network %s", n.Name())
return
}
var endpoints []hcsshim.HNSEndpoint
for eid, be := range lb.backEnds {
if be.disabled {
continue
}
//Call HNS to get back ID (GUID) corresponding to the endpoint.
hnsEndpoint, err := hcsshim.GetHNSEndpointByName(eid)
if err != nil {
logrus.Errorf("Failed to find HNS ID for endpoint %v: %v", eid, err)
return
}
endpoints = append(endpoints, *hnsEndpoint)
}
if policies, ok := lbPolicylistMap[lb]; ok {
if policies.ilb != nil {
policies.ilb.Delete()
policies.ilb = nil
}
if policies.elb != nil {
policies.elb.Delete()
policies.elb = nil
}
delete(lbPolicylistMap, lb)
}
ilbPolicy, err := hcsshim.AddLoadBalancer(endpoints, true, sourceVIP, vip.String(), 0, 0, 0)
if err != nil {
logrus.Errorf("Failed to add ILB policy for service %s (%s) with endpoints %v using load balancer IP %s on network %s: %v",
lb.service.name, vip.String(), endpoints, sourceVIP, n.Name(), err)
return
}
lbPolicylistMap[lb] = &policyLists{
ilb: ilbPolicy,
}
publishedPorts := make(map[uint32]uint32)
for i, port := range ingressPorts {
protocol := uint16(6)
// Skip already published port
if publishedPorts[port.PublishedPort] == port.TargetPort {
continue
}
if port.Protocol == ProtocolUDP {
protocol = 17
}
// check if already has udp matching to add wild card publishing
for j := i + 1; j < len(ingressPorts); j++ {
if ingressPorts[j].TargetPort == port.TargetPort &&
ingressPorts[j].PublishedPort == port.PublishedPort {
protocol = 0
}
}
publishedPorts[port.PublishedPort] = port.TargetPort
lbPolicylistMap[lb].elb, err = hcsshim.AddLoadBalancer(endpoints, false, sourceVIP, "", protocol, uint16(port.TargetPort), uint16(port.PublishedPort))
if err != nil {
logrus.Errorf("Failed to add ELB policy for service %s (ip:%s target port:%v published port:%v) with endpoints %v using load balancer IP %s on network %s: %v",
lb.service.name, vip.String(), uint16(port.TargetPort), uint16(port.PublishedPort), endpoints, sourceVIP, n.Name(), err)
return
}
}
}
}
func (n *network) rmLBBackend(ip net.IP, lb *loadBalancer, rmService bool, fullRemove bool) {
if len(lb.vip) == 0 {
return
}
if system.GetOSVersion().Build > 16236 {
if numEnabledBackends(lb) > 0 {
//Reprogram HNS (actually VFP) with the existing backends.
n.addLBBackend(ip, lb)
} else {
lb.Lock()
defer lb.Unlock()
logrus.Debugf("No more backends for service %s (ip:%s). Removing all policies", lb.service.name, lb.vip.String())
if policyLists, ok := lbPolicylistMap[lb]; ok {
if policyLists.ilb != nil {
policyLists.ilb.Delete()
policyLists.ilb = nil
}
if policyLists.elb != nil {
policyLists.elb.Delete()
policyLists.elb = nil
}
delete(lbPolicylistMap, lb)
} else {
logrus.Errorf("Failed to find policies for service %s (%s)", lb.service.name, lb.vip.String())
}
}
}
}
func numEnabledBackends(lb *loadBalancer) int {
nEnabled := 0
for _, be := range lb.backEnds {
if !be.disabled {
nEnabled++
}
}
return nEnabled
}
func (sb *sandbox) populateLoadBalancers(ep *endpoint) {
}
func arrangeIngressFilterRule() {
}