2016-10-12 19:55:20 -04:00
package libnetwork
2017-08-29 02:35:31 -04:00
import (
"net"
2016-10-12 19:55:20 -04:00
2017-08-29 02:35:31 -04:00
"github.com/Microsoft/hcsshim"
2020-08-31 14:18:32 -04:00
"github.com/Microsoft/hcsshim/osversion"
2017-08-29 02:35:31 -04:00
"github.com/sirupsen/logrus"
)
type policyLists struct {
ilb * hcsshim . PolicyList
elb * hcsshim . PolicyList
}
2020-10-31 19:19:42 -04:00
var lbPolicylistMap = make ( map [ * loadBalancer ] * policyLists )
2017-08-29 02:35:31 -04:00
2018-04-10 00:36:19 -04:00
func ( n * network ) addLBBackend ( ip net . IP , lb * loadBalancer ) {
if len ( lb . vip ) == 0 {
return
}
vip := lb . vip
ingressPorts := lb . service . ingressPorts
2017-08-29 02:35:31 -04:00
2020-08-31 14:18:32 -04:00
if osversion . Build ( ) > 16236 {
2017-08-29 02:35:31 -04:00
lb . Lock ( )
defer lb . Unlock ( )
//find the load balancer IP for the network.
var sourceVIP string
for _ , e := range n . Endpoints ( ) {
epInfo := e . Info ( )
if epInfo == nil {
continue
}
if epInfo . LoadBalancer ( ) {
sourceVIP = epInfo . Iface ( ) . Address ( ) . IP . String ( )
break
}
}
if sourceVIP == "" {
logrus . Errorf ( "Failed to find load balancer IP for network %s" , n . Name ( ) )
return
}
var endpoints [ ] hcsshim . HNSEndpoint
Gracefully remove LB endpoints from services
This patch attempts to allow endpoints to complete servicing connections
while being removed from a service. The change adds a flag to the
endpoint.deleteServiceInfoFromCluster() method to indicate whether this
removal should fully remove connectivity through the load balancer
to the endpoint or should just disable directing further connections to
the endpoint. If the flag is 'false', then the load balancer assigns
a weight of 0 to the endpoint but does not remove it as a linux load
balancing destination. It does remove the endpoint as a docker load
balancing endpoint but tracks it in a special map of "disabled-but-not-
destroyed" load balancing endpoints. This allows traffic to continue
flowing, at least under Linux. If the flag is 'true', then the code
removes the endpoint entirely as a load balancing destination.
The sandbox.DisableService() method invokes deleteServiceInfoFromCluster()
with the flag sent to 'false', while the endpoint.sbLeave() method invokes
it with the flag set to 'true' to complete the removal on endpoint
finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster()
with the flag set to 'true' because renaming attempts to completely
remove and then re-add each endpoint service entry.
The controller.rmServiceBinding() method, which carries out the operation,
similarly gets a new flag for whether to fully remove the endpoint. If
the flag is false, it does the job of moving the endpoint from the
load balancing set to the 'disabled' set. It then removes or
de-weights the entry in the OS load balancing table via
network.rmLBBackend(). It removes the service entirely via said method
ONLY IF there are no more live or disabled load balancing endpoints.
Similarly network.addLBBackend() requires slight tweaking to properly
manage the disabled set.
Finally, this change requires propagating the status of disabled
service endpoints via the networkDB. Accordingly, the patch includes
both code to generate and handle service update messages. It also
augments the service structure with a ServiceDisabled boolean to convey
whether an endpoint should ultimately be removed or just disabled.
This, naturally, required a rebuild of the protocol buffer code as well.
Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
for eid , be := range lb . backEnds {
if be . disabled {
continue
}
2017-08-29 02:35:31 -04:00
//Call HNS to get back ID (GUID) corresponding to the endpoint.
hnsEndpoint , err := hcsshim . GetHNSEndpointByName ( eid )
if err != nil {
logrus . Errorf ( "Failed to find HNS ID for endpoint %v: %v" , eid , err )
return
}
endpoints = append ( endpoints , * hnsEndpoint )
}
if policies , ok := lbPolicylistMap [ lb ] ; ok {
if policies . ilb != nil {
policies . ilb . Delete ( )
policies . ilb = nil
}
if policies . elb != nil {
policies . elb . Delete ( )
policies . elb = nil
}
delete ( lbPolicylistMap , lb )
}
ilbPolicy , err := hcsshim . AddLoadBalancer ( endpoints , true , sourceVIP , vip . String ( ) , 0 , 0 , 0 )
if err != nil {
logrus . Errorf ( "Failed to add ILB policy for service %s (%s) with endpoints %v using load balancer IP %s on network %s: %v" ,
lb . service . name , vip . String ( ) , endpoints , sourceVIP , n . Name ( ) , err )
2017-09-07 13:36:11 -04:00
return
2017-08-29 02:35:31 -04:00
}
lbPolicylistMap [ lb ] = & policyLists {
ilb : ilbPolicy ,
}
publishedPorts := make ( map [ uint32 ] uint32 )
for i , port := range ingressPorts {
protocol := uint16 ( 6 )
// Skip already published port
if publishedPorts [ port . PublishedPort ] == port . TargetPort {
continue
}
if port . Protocol == ProtocolUDP {
protocol = 17
}
// check if already has udp matching to add wild card publishing
for j := i + 1 ; j < len ( ingressPorts ) ; j ++ {
if ingressPorts [ j ] . TargetPort == port . TargetPort &&
ingressPorts [ j ] . PublishedPort == port . PublishedPort {
protocol = 0
}
}
publishedPorts [ port . PublishedPort ] = port . TargetPort
lbPolicylistMap [ lb ] . elb , err = hcsshim . AddLoadBalancer ( endpoints , false , sourceVIP , "" , protocol , uint16 ( port . TargetPort ) , uint16 ( port . PublishedPort ) )
if err != nil {
logrus . Errorf ( "Failed to add ELB policy for service %s (ip:%s target port:%v published port:%v) with endpoints %v using load balancer IP %s on network %s: %v" ,
lb . service . name , vip . String ( ) , uint16 ( port . TargetPort ) , uint16 ( port . PublishedPort ) , endpoints , sourceVIP , n . Name ( ) , err )
return
}
}
}
2016-10-12 19:55:20 -04:00
}
2018-04-10 00:36:19 -04:00
func ( n * network ) rmLBBackend ( ip net . IP , lb * loadBalancer , rmService bool , fullRemove bool ) {
if len ( lb . vip ) == 0 {
return
}
2020-08-31 14:18:32 -04:00
if osversion . Build ( ) > 16236 {
Gracefully remove LB endpoints from services
This patch attempts to allow endpoints to complete servicing connections
while being removed from a service. The change adds a flag to the
endpoint.deleteServiceInfoFromCluster() method to indicate whether this
removal should fully remove connectivity through the load balancer
to the endpoint or should just disable directing further connections to
the endpoint. If the flag is 'false', then the load balancer assigns
a weight of 0 to the endpoint but does not remove it as a linux load
balancing destination. It does remove the endpoint as a docker load
balancing endpoint but tracks it in a special map of "disabled-but-not-
destroyed" load balancing endpoints. This allows traffic to continue
flowing, at least under Linux. If the flag is 'true', then the code
removes the endpoint entirely as a load balancing destination.
The sandbox.DisableService() method invokes deleteServiceInfoFromCluster()
with the flag sent to 'false', while the endpoint.sbLeave() method invokes
it with the flag set to 'true' to complete the removal on endpoint
finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster()
with the flag set to 'true' because renaming attempts to completely
remove and then re-add each endpoint service entry.
The controller.rmServiceBinding() method, which carries out the operation,
similarly gets a new flag for whether to fully remove the endpoint. If
the flag is false, it does the job of moving the endpoint from the
load balancing set to the 'disabled' set. It then removes or
de-weights the entry in the OS load balancing table via
network.rmLBBackend(). It removes the service entirely via said method
ONLY IF there are no more live or disabled load balancing endpoints.
Similarly network.addLBBackend() requires slight tweaking to properly
manage the disabled set.
Finally, this change requires propagating the status of disabled
service endpoints via the networkDB. Accordingly, the patch includes
both code to generate and handle service update messages. It also
augments the service structure with a ServiceDisabled boolean to convey
whether an endpoint should ultimately be removed or just disabled.
This, naturally, required a rebuild of the protocol buffer code as well.
Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
if numEnabledBackends ( lb ) > 0 {
2017-08-29 02:35:31 -04:00
//Reprogram HNS (actually VFP) with the existing backends.
2018-04-10 00:36:19 -04:00
n . addLBBackend ( ip , lb )
2017-08-29 02:35:31 -04:00
} else {
lb . Lock ( )
defer lb . Unlock ( )
logrus . Debugf ( "No more backends for service %s (ip:%s). Removing all policies" , lb . service . name , lb . vip . String ( ) )
if policyLists , ok := lbPolicylistMap [ lb ] ; ok {
if policyLists . ilb != nil {
policyLists . ilb . Delete ( )
policyLists . ilb = nil
}
if policyLists . elb != nil {
policyLists . elb . Delete ( )
policyLists . elb = nil
}
delete ( lbPolicylistMap , lb )
} else {
logrus . Errorf ( "Failed to find policies for service %s (%s)" , lb . service . name , lb . vip . String ( ) )
}
}
}
2016-10-12 19:55:20 -04:00
}
Gracefully remove LB endpoints from services
This patch attempts to allow endpoints to complete servicing connections
while being removed from a service. The change adds a flag to the
endpoint.deleteServiceInfoFromCluster() method to indicate whether this
removal should fully remove connectivity through the load balancer
to the endpoint or should just disable directing further connections to
the endpoint. If the flag is 'false', then the load balancer assigns
a weight of 0 to the endpoint but does not remove it as a linux load
balancing destination. It does remove the endpoint as a docker load
balancing endpoint but tracks it in a special map of "disabled-but-not-
destroyed" load balancing endpoints. This allows traffic to continue
flowing, at least under Linux. If the flag is 'true', then the code
removes the endpoint entirely as a load balancing destination.
The sandbox.DisableService() method invokes deleteServiceInfoFromCluster()
with the flag sent to 'false', while the endpoint.sbLeave() method invokes
it with the flag set to 'true' to complete the removal on endpoint
finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster()
with the flag set to 'true' because renaming attempts to completely
remove and then re-add each endpoint service entry.
The controller.rmServiceBinding() method, which carries out the operation,
similarly gets a new flag for whether to fully remove the endpoint. If
the flag is false, it does the job of moving the endpoint from the
load balancing set to the 'disabled' set. It then removes or
de-weights the entry in the OS load balancing table via
network.rmLBBackend(). It removes the service entirely via said method
ONLY IF there are no more live or disabled load balancing endpoints.
Similarly network.addLBBackend() requires slight tweaking to properly
manage the disabled set.
Finally, this change requires propagating the status of disabled
service endpoints via the networkDB. Accordingly, the patch includes
both code to generate and handle service update messages. It also
augments the service structure with a ServiceDisabled boolean to convey
whether an endpoint should ultimately be removed or just disabled.
This, naturally, required a rebuild of the protocol buffer code as well.
Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
func numEnabledBackends ( lb * loadBalancer ) int {
nEnabled := 0
for _ , be := range lb . backEnds {
if ! be . disabled {
nEnabled ++
}
}
return nEnabled
}
2018-04-10 12:34:41 -04:00
func ( sb * sandbox ) populateLoadBalancers ( ep * endpoint ) {
2016-10-12 19:55:20 -04:00
}
func arrangeIngressFilterRule ( ) {
}