1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00
moby--moby/libnetwork/agent.pb.go

1096 lines
28 KiB
Go
Raw Normal View History

// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: agent.proto
/*
Package libnetwork is a generated protocol buffer package.
It is generated from these files:
agent.proto
It has these top-level messages:
EndpointRecord
PortConfig
*/
package libnetwork
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type PortConfig_Protocol int32
const (
ProtocolTCP PortConfig_Protocol = 0
ProtocolUDP PortConfig_Protocol = 1
ProtocolSCTP PortConfig_Protocol = 2
)
var PortConfig_Protocol_name = map[int32]string{
0: "TCP",
1: "UDP",
2: "SCTP",
}
var PortConfig_Protocol_value = map[string]int32{
"TCP": 0,
"UDP": 1,
"SCTP": 2,
}
func (x PortConfig_Protocol) String() string {
return proto.EnumName(PortConfig_Protocol_name, int32(x))
}
func (PortConfig_Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 0} }
// EndpointRecord specifies all the endpoint specific information that
// needs to gossiped to nodes participating in the network.
type EndpointRecord struct {
// Name of the container
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Service name of the service to which this endpoint belongs.
ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
// Service ID of the service to which this endpoint belongs.
ServiceID string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
// Virtual IP of the service to which this endpoint belongs.
VirtualIP string `protobuf:"bytes,4,opt,name=virtual_ip,json=virtualIp,proto3" json:"virtual_ip,omitempty"`
// IP assigned to this endpoint.
EndpointIP string `protobuf:"bytes,5,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"`
// IngressPorts exposed by the service to which this endpoint belongs.
IngressPorts []*PortConfig `protobuf:"bytes,6,rep,name=ingress_ports,json=ingressPorts" json:"ingress_ports,omitempty"`
// A list of aliases which are alternate names for the service
Aliases []string `protobuf:"bytes,7,rep,name=aliases" json:"aliases,omitempty"`
// List of aliases task specific aliases
TaskAliases []string `protobuf:"bytes,8,rep,name=task_aliases,json=taskAliases" json:"task_aliases,omitempty"`
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
// Whether this enpoint's service has been disabled
ServiceDisabled bool `protobuf:"varint,9,opt,name=service_disabled,json=serviceDisabled,proto3" json:"service_disabled,omitempty"`
}
func (m *EndpointRecord) Reset() { *m = EndpointRecord{} }
func (*EndpointRecord) ProtoMessage() {}
func (*EndpointRecord) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0} }
func (m *EndpointRecord) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *EndpointRecord) GetServiceName() string {
if m != nil {
return m.ServiceName
}
return ""
}
func (m *EndpointRecord) GetServiceID() string {
if m != nil {
return m.ServiceID
}
return ""
}
func (m *EndpointRecord) GetVirtualIP() string {
if m != nil {
return m.VirtualIP
}
return ""
}
func (m *EndpointRecord) GetEndpointIP() string {
if m != nil {
return m.EndpointIP
}
return ""
}
func (m *EndpointRecord) GetIngressPorts() []*PortConfig {
if m != nil {
return m.IngressPorts
}
return nil
}
func (m *EndpointRecord) GetAliases() []string {
if m != nil {
return m.Aliases
}
return nil
}
func (m *EndpointRecord) GetTaskAliases() []string {
if m != nil {
return m.TaskAliases
}
return nil
}
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
func (m *EndpointRecord) GetServiceDisabled() bool {
if m != nil {
return m.ServiceDisabled
}
return false
}
// PortConfig specifies an exposed port which can be
// addressed using the given name. This can be later queried
// using a service discovery api or a DNS SRV query. The node
// port specifies a port that can be used to address this
// service external to the cluster by sending a connection
// request to this port to any node on the cluster.
type PortConfig struct {
// Name for the port. If provided the port information can
// be queried using the name as in a DNS SRV query.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Protocol for the port which is exposed.
Protocol PortConfig_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=libnetwork.PortConfig_Protocol" json:"protocol,omitempty"`
// The port which the application is exposing and is bound to.
TargetPort uint32 `protobuf:"varint,3,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"`
// PublishedPort specifies the port on which the service is
// exposed on all nodes on the cluster. If not specified an
// arbitrary port in the node port range is allocated by the
// system. If specified it should be within the node port
// range and it should be available.
PublishedPort uint32 `protobuf:"varint,4,opt,name=published_port,json=publishedPort,proto3" json:"published_port,omitempty"`
}
func (m *PortConfig) Reset() { *m = PortConfig{} }
func (*PortConfig) ProtoMessage() {}
func (*PortConfig) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1} }
func (m *PortConfig) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *PortConfig) GetProtocol() PortConfig_Protocol {
if m != nil {
return m.Protocol
}
return ProtocolTCP
}
func (m *PortConfig) GetTargetPort() uint32 {
if m != nil {
return m.TargetPort
}
return 0
}
func (m *PortConfig) GetPublishedPort() uint32 {
if m != nil {
return m.PublishedPort
}
return 0
}
func init() {
proto.RegisterType((*EndpointRecord)(nil), "libnetwork.EndpointRecord")
proto.RegisterType((*PortConfig)(nil), "libnetwork.PortConfig")
proto.RegisterEnum("libnetwork.PortConfig_Protocol", PortConfig_Protocol_name, PortConfig_Protocol_value)
}
func (this *EndpointRecord) GoString() string {
if this == nil {
return "nil"
}
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
s := make([]string, 0, 13)
s = append(s, "&libnetwork.EndpointRecord{")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
s = append(s, "ServiceName: "+fmt.Sprintf("%#v", this.ServiceName)+",\n")
s = append(s, "ServiceID: "+fmt.Sprintf("%#v", this.ServiceID)+",\n")
s = append(s, "VirtualIP: "+fmt.Sprintf("%#v", this.VirtualIP)+",\n")
s = append(s, "EndpointIP: "+fmt.Sprintf("%#v", this.EndpointIP)+",\n")
if this.IngressPorts != nil {
s = append(s, "IngressPorts: "+fmt.Sprintf("%#v", this.IngressPorts)+",\n")
}
s = append(s, "Aliases: "+fmt.Sprintf("%#v", this.Aliases)+",\n")
s = append(s, "TaskAliases: "+fmt.Sprintf("%#v", this.TaskAliases)+",\n")
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
s = append(s, "ServiceDisabled: "+fmt.Sprintf("%#v", this.ServiceDisabled)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *PortConfig) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 8)
s = append(s, "&libnetwork.PortConfig{")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
s = append(s, "Protocol: "+fmt.Sprintf("%#v", this.Protocol)+",\n")
s = append(s, "TargetPort: "+fmt.Sprintf("%#v", this.TargetPort)+",\n")
s = append(s, "PublishedPort: "+fmt.Sprintf("%#v", this.PublishedPort)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringAgent(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func (m *EndpointRecord) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *EndpointRecord) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintAgent(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if len(m.ServiceName) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintAgent(dAtA, i, uint64(len(m.ServiceName)))
i += copy(dAtA[i:], m.ServiceName)
}
if len(m.ServiceID) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintAgent(dAtA, i, uint64(len(m.ServiceID)))
i += copy(dAtA[i:], m.ServiceID)
}
if len(m.VirtualIP) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintAgent(dAtA, i, uint64(len(m.VirtualIP)))
i += copy(dAtA[i:], m.VirtualIP)
}
if len(m.EndpointIP) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintAgent(dAtA, i, uint64(len(m.EndpointIP)))
i += copy(dAtA[i:], m.EndpointIP)
}
if len(m.IngressPorts) > 0 {
for _, msg := range m.IngressPorts {
dAtA[i] = 0x32
i++
i = encodeVarintAgent(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.Aliases) > 0 {
for _, s := range m.Aliases {
dAtA[i] = 0x3a
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
if len(m.TaskAliases) > 0 {
for _, s := range m.TaskAliases {
dAtA[i] = 0x42
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
if m.ServiceDisabled {
dAtA[i] = 0x48
i++
if m.ServiceDisabled {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
return i, nil
}
func (m *PortConfig) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PortConfig) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintAgent(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if m.Protocol != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintAgent(dAtA, i, uint64(m.Protocol))
}
if m.TargetPort != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintAgent(dAtA, i, uint64(m.TargetPort))
}
if m.PublishedPort != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintAgent(dAtA, i, uint64(m.PublishedPort))
}
return i, nil
}
func encodeVarintAgent(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *EndpointRecord) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
l = len(m.ServiceName)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
l = len(m.ServiceID)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
l = len(m.VirtualIP)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
l = len(m.EndpointIP)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
if len(m.IngressPorts) > 0 {
for _, e := range m.IngressPorts {
l = e.Size()
n += 1 + l + sovAgent(uint64(l))
}
}
if len(m.Aliases) > 0 {
for _, s := range m.Aliases {
l = len(s)
n += 1 + l + sovAgent(uint64(l))
}
}
if len(m.TaskAliases) > 0 {
for _, s := range m.TaskAliases {
l = len(s)
n += 1 + l + sovAgent(uint64(l))
}
}
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
if m.ServiceDisabled {
n += 2
}
return n
}
func (m *PortConfig) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAgent(uint64(l))
}
if m.Protocol != 0 {
n += 1 + sovAgent(uint64(m.Protocol))
}
if m.TargetPort != 0 {
n += 1 + sovAgent(uint64(m.TargetPort))
}
if m.PublishedPort != 0 {
n += 1 + sovAgent(uint64(m.PublishedPort))
}
return n
}
func sovAgent(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozAgent(x uint64) (n int) {
return sovAgent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *EndpointRecord) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&EndpointRecord{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`,
`ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`,
`VirtualIP:` + fmt.Sprintf("%v", this.VirtualIP) + `,`,
`EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`,
`IngressPorts:` + strings.Replace(fmt.Sprintf("%v", this.IngressPorts), "PortConfig", "PortConfig", 1) + `,`,
`Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`,
`TaskAliases:` + fmt.Sprintf("%v", this.TaskAliases) + `,`,
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
`ServiceDisabled:` + fmt.Sprintf("%v", this.ServiceDisabled) + `,`,
`}`,
}, "")
return s
}
func (this *PortConfig) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&PortConfig{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
`TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`,
`PublishedPort:` + fmt.Sprintf("%v", this.PublishedPort) + `,`,
`}`,
}, "")
return s
}
func valueToStringAgent(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *EndpointRecord) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: EndpointRecord: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: EndpointRecord: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ServiceName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ServiceID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field VirtualIP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.VirtualIP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EndpointIP", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EndpointIP = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IngressPorts", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.IngressPorts = append(m.IngressPorts, &PortConfig{})
if err := m.IngressPorts[len(m.IngressPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Aliases = append(m.Aliases, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TaskAliases", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TaskAliases = append(m.TaskAliases, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ServiceDisabled", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.ServiceDisabled = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipAgent(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAgent
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PortConfig) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PortConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PortConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAgent
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
}
m.Protocol = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Protocol |= (PortConfig_Protocol(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType)
}
m.TargetPort = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TargetPort |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PublishedPort", wireType)
}
m.PublishedPort = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAgent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.PublishedPort |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipAgent(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAgent
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipAgent(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAgent
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAgent
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAgent
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthAgent
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAgent
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipAgent(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthAgent = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowAgent = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("agent.proto", fileDescriptorAgent) }
var fileDescriptorAgent = []byte{
Gracefully remove LB endpoints from services This patch attempts to allow endpoints to complete servicing connections while being removed from a service. The change adds a flag to the endpoint.deleteServiceInfoFromCluster() method to indicate whether this removal should fully remove connectivity through the load balancer to the endpoint or should just disable directing further connections to the endpoint. If the flag is 'false', then the load balancer assigns a weight of 0 to the endpoint but does not remove it as a linux load balancing destination. It does remove the endpoint as a docker load balancing endpoint but tracks it in a special map of "disabled-but-not- destroyed" load balancing endpoints. This allows traffic to continue flowing, at least under Linux. If the flag is 'true', then the code removes the endpoint entirely as a load balancing destination. The sandbox.DisableService() method invokes deleteServiceInfoFromCluster() with the flag sent to 'false', while the endpoint.sbLeave() method invokes it with the flag set to 'true' to complete the removal on endpoint finalization. Renaming the endpoint invokes deleteServiceInfoFromCluster() with the flag set to 'true' because renaming attempts to completely remove and then re-add each endpoint service entry. The controller.rmServiceBinding() method, which carries out the operation, similarly gets a new flag for whether to fully remove the endpoint. If the flag is false, it does the job of moving the endpoint from the load balancing set to the 'disabled' set. It then removes or de-weights the entry in the OS load balancing table via network.rmLBBackend(). It removes the service entirely via said method ONLY IF there are no more live or disabled load balancing endpoints. Similarly network.addLBBackend() requires slight tweaking to properly manage the disabled set. Finally, this change requires propagating the status of disabled service endpoints via the networkDB. Accordingly, the patch includes both code to generate and handle service update messages. It also augments the service structure with a ServiceDisabled boolean to convey whether an endpoint should ultimately be removed or just disabled. This, naturally, required a rebuild of the protocol buffer code as well. Signed-off-by: Chris Telfer <ctelfer@docker.com>
2018-02-14 17:04:23 -05:00
// 459 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x31, 0x6f, 0xd3, 0x4c,
0x18, 0xc7, 0xe3, 0xc4, 0x6f, 0x1b, 0x3f, 0x4e, 0x52, 0xeb, 0xf4, 0x0a, 0x59, 0x1e, 0x1c, 0x13,
0x09, 0x29, 0x48, 0x28, 0x95, 0xca, 0xd8, 0x89, 0x26, 0x0c, 0x5e, 0x90, 0x75, 0x4d, 0x59, 0x83,
0x13, 0x1f, 0xe6, 0x54, 0xe3, 0xb3, 0xee, 0xae, 0x65, 0x65, 0x03, 0xf5, 0x3b, 0x74, 0xe2, 0xcb,
0x30, 0x32, 0x32, 0x55, 0xd4, 0x9f, 0x80, 0x95, 0x0d, 0xdd, 0xf9, 0xae, 0x11, 0x52, 0xb7, 0xf3,
0xef, 0xff, 0x3b, 0xeb, 0xb9, 0xff, 0x03, 0x7e, 0x5e, 0x92, 0x5a, 0x2e, 0x1a, 0xce, 0x24, 0x43,
0x50, 0xd1, 0x6d, 0x4d, 0xe4, 0x27, 0xc6, 0x2f, 0xa3, 0xff, 0x4b, 0x56, 0x32, 0x8d, 0x8f, 0xd5,
0xa9, 0x33, 0x66, 0x7f, 0xfa, 0x30, 0x79, 0x5d, 0x17, 0x0d, 0xa3, 0xb5, 0xc4, 0x64, 0xc7, 0x78,
0x81, 0x10, 0xb8, 0x75, 0xfe, 0x91, 0x84, 0x4e, 0xe2, 0xcc, 0x3d, 0xac, 0xcf, 0xe8, 0x29, 0x8c,
0x04, 0xe1, 0xd7, 0x74, 0x47, 0x36, 0x3a, 0xeb, 0xeb, 0xcc, 0x37, 0xec, 0x8d, 0x52, 0x5e, 0x00,
0x58, 0x85, 0x16, 0xe1, 0x40, 0x09, 0x67, 0xe3, 0xf6, 0x6e, 0xea, 0x9d, 0x77, 0x34, 0x5d, 0x61,
0xcf, 0x08, 0x69, 0xa1, 0xec, 0x6b, 0xca, 0xe5, 0x55, 0x5e, 0x6d, 0x68, 0x13, 0xba, 0x7b, 0xfb,
0x6d, 0x47, 0xd3, 0x0c, 0x7b, 0x46, 0x48, 0x1b, 0x74, 0x0c, 0x3e, 0x31, 0x43, 0x2a, 0xfd, 0x3f,
0xad, 0x4f, 0xda, 0xbb, 0x29, 0xd8, 0xd9, 0xd3, 0x0c, 0x83, 0x55, 0xd2, 0x06, 0x9d, 0xc2, 0x98,
0xd6, 0x25, 0x27, 0x42, 0x6c, 0x1a, 0xc6, 0xa5, 0x08, 0x0f, 0x92, 0xc1, 0xdc, 0x3f, 0x79, 0xb2,
0xd8, 0x17, 0xb2, 0xc8, 0x18, 0x97, 0x4b, 0x56, 0xbf, 0xa7, 0x25, 0x1e, 0x19, 0x59, 0x21, 0x81,
0x42, 0x38, 0xcc, 0x2b, 0x9a, 0x0b, 0x22, 0xc2, 0xc3, 0x64, 0x30, 0xf7, 0xb0, 0xfd, 0x54, 0x35,
0xc8, 0x5c, 0x5c, 0x6e, 0x6c, 0x3c, 0xd4, 0xb1, 0xaf, 0xd8, 0x2b, 0xa3, 0x3c, 0x87, 0xc0, 0xd6,
0x50, 0x50, 0x91, 0x6f, 0x2b, 0x52, 0x84, 0x5e, 0xe2, 0xcc, 0x87, 0xf8, 0xc8, 0xf0, 0x95, 0xc1,
0xb3, 0x2f, 0x7d, 0x80, 0xfd, 0x10, 0x8f, 0xf6, 0x7e, 0x0a, 0x43, 0xbd, 0xa7, 0x1d, 0xab, 0x74,
0xe7, 0x93, 0x93, 0xe9, 0xe3, 0x4f, 0x58, 0x64, 0x46, 0xc3, 0x0f, 0x17, 0xd0, 0x14, 0x7c, 0x99,
0xf3, 0x92, 0x48, 0xdd, 0x81, 0x5e, 0xc9, 0x18, 0x43, 0x87, 0xd4, 0x4d, 0xf4, 0x0c, 0x26, 0xcd,
0xd5, 0xb6, 0xa2, 0xe2, 0x03, 0x29, 0x3a, 0xc7, 0xd5, 0xce, 0xf8, 0x81, 0x2a, 0x6d, 0xf6, 0x0e,
0x86, 0xf6, 0xef, 0x28, 0x84, 0xc1, 0x7a, 0x99, 0x05, 0xbd, 0xe8, 0xe8, 0xe6, 0x36, 0xf1, 0x2d,
0x5e, 0x2f, 0x33, 0x95, 0x5c, 0xac, 0xb2, 0xc0, 0xf9, 0x37, 0xb9, 0x58, 0x65, 0x28, 0x02, 0xf7,
0x7c, 0xb9, 0xce, 0x82, 0x7e, 0x14, 0xdc, 0xdc, 0x26, 0x23, 0x1b, 0x29, 0x16, 0xb9, 0x5f, 0xbf,
0xc5, 0xbd, 0xb3, 0xf0, 0xe7, 0x7d, 0xdc, 0xfb, 0x7d, 0x1f, 0x3b, 0x9f, 0xdb, 0xd8, 0xf9, 0xde,
0xc6, 0xce, 0x8f, 0x36, 0x76, 0x7e, 0xb5, 0xb1, 0xb3, 0x3d, 0xd0, 0xaf, 0x79, 0xf9, 0x37, 0x00,
0x00, 0xff, 0xff, 0x55, 0x29, 0x75, 0x5c, 0xd7, 0x02, 0x00, 0x00,
}