1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #36538 from ctelfer/ingress-fix

Fix automatic removal of ingress sandbox when last service leaves
This commit is contained in:
Sebastiaan van Stijn 2018-03-13 20:56:03 +01:00 committed by GitHub
commit 514fb6cf85
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 248 additions and 42 deletions

View file

@ -222,6 +222,8 @@ func (daemon *Daemon) releaseIngress(id string) {
return
}
daemon.deleteLoadBalancerSandbox(n)
if err := n.Delete(); err != nil {
logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err)
return

View file

@ -36,6 +36,9 @@ func TestServiceWithPredefinedNetwork(t *testing.T) {
if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" {
config.Timeout = 50 * time.Second
config.Delay = 100 * time.Millisecond
} else {
config.Timeout = 30 * time.Second
config.Delay = 100 * time.Millisecond
}
}
@ -53,6 +56,72 @@ func TestServiceWithPredefinedNetwork(t *testing.T) {
}
const ingressNet = "ingress"
func TestServiceWithIngressNetwork(t *testing.T) {
defer setupTest(t)()
d := newSwarm(t)
defer d.Stop(t)
client, err := client.NewClientWithOpts(client.WithHost((d.Sock())))
require.NoError(t, err)
pollSettings := func(config *poll.Settings) {
if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" {
config.Timeout = 50 * time.Second
config.Delay = 100 * time.Millisecond
} else {
config.Timeout = 30 * time.Second
config.Delay = 100 * time.Millisecond
}
}
poll.WaitOn(t, swarmIngressReady(client), pollSettings)
var instances uint64 = 1
serviceName := "TestIngressService"
serviceSpec := swarmServiceSpec(serviceName, instances)
serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarm.NetworkAttachmentConfig{Target: ingressNet})
serviceSpec.EndpointSpec = &swarm.EndpointSpec{
Ports: []swarm.PortConfig{
{
Protocol: swarm.PortConfigProtocolTCP,
TargetPort: 80,
PublishMode: swarm.PortConfigPublishModeIngress,
},
},
}
serviceResp, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{
QueryRegistry: false,
})
require.NoError(t, err)
serviceID := serviceResp.ID
poll.WaitOn(t, serviceRunningCount(client, serviceID, instances), pollSettings)
_, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
require.NoError(t, err)
err = client.ServiceRemove(context.Background(), serviceID)
require.NoError(t, err)
poll.WaitOn(t, serviceIsRemoved(client, serviceID), pollSettings)
poll.WaitOn(t, noTasks(client), pollSettings)
// Ensure that "ingress" is not removed or corrupted
time.Sleep(10 * time.Second)
netInfo, err := client.NetworkInspect(context.Background(), ingressNet, types.NetworkInspectOptions{
Verbose: true,
Scope: "swarm",
})
require.NoError(t, err, "Ingress network was removed after removing service!")
require.NotZero(t, len(netInfo.Containers), "No load balancing endpoints in ingress network")
require.NotZero(t, len(netInfo.Peers), "No peers (including self) in ingress network")
_, ok := netInfo.Containers["ingress-sbox"]
require.True(t, ok, "ingress-sbox not present in ingress network")
}
func serviceRunningCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
filter := filters.NewArgs()
@ -68,3 +137,25 @@ func serviceRunningCount(client client.ServiceAPIClient, serviceID string, insta
return poll.Success()
}
}
func swarmIngressReady(client client.NetworkAPIClient) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
netInfo, err := client.NetworkInspect(context.Background(), ingressNet, types.NetworkInspectOptions{
Verbose: true,
Scope: "swarm",
})
if err != nil {
return poll.Error(err)
}
np := len(netInfo.Peers)
nc := len(netInfo.Containers)
if np == 0 || nc == 0 {
return poll.Continue("ingress not ready: %d peers and %d containers", nc, np)
}
_, ok := netInfo.Containers["ingress-sbox"]
if !ok {
return poll.Continue("ingress not ready: does not contain the ingress-sbox")
}
return poll.Success()
}
}

View file

@ -34,7 +34,7 @@ github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
#get libnetwork packages
# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly
github.com/docker/libnetwork ed2130d117c11c542327b4d5216a5db36770bc65
github.com/docker/libnetwork 3aca383eb555510f3f17696f9505f7bfbd25f0e5
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec

View file

@ -10,6 +10,7 @@ import (
"github.com/docker/libkv/store"
"github.com/docker/libnetwork/cluster"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/ipamutils"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/osl"
"github.com/sirupsen/logrus"
@ -40,6 +41,7 @@ type DaemonCfg struct {
DriverCfg map[string]interface{}
ClusterProvider cluster.Provider
NetworkControlPlaneMTU int
DefaultAddressPool []*ipamutils.NetworkToSplit
}
// ClusterCfg represents cluster configuration
@ -110,6 +112,13 @@ func OptionDefaultDriver(dd string) Option {
}
}
// OptionDefaultAddressPoolConfig function returns an option setter for default address pool
func OptionDefaultAddressPoolConfig(addressPool []*ipamutils.NetworkToSplit) Option {
return func(c *Config) {
c.Daemon.DefaultAddressPool = addressPool
}
}
// OptionDriverConfig returns an option setter for driver configuration.
func OptionDriverConfig(networkType string, config map[string]interface{}) Option {
return func(c *Config) {

View file

@ -222,7 +222,7 @@ func New(cfgOptions ...config.Option) (NetworkController, error) {
}
}
if err = initIPAMDrivers(drvRegistry, nil, c.getStore(datastore.GlobalScope)); err != nil {
if err = initIPAMDrivers(drvRegistry, nil, c.getStore(datastore.GlobalScope), c.cfg.Daemon.DefaultAddressPool); err != nil {
return nil, err
}

View file

@ -782,7 +782,9 @@ func (d *driver) deleteNetwork(nid string) error {
logrus.Warn(err)
}
if link, err := d.nlh.LinkByName(ep.srcName); err == nil {
d.nlh.LinkDel(link)
if err := d.nlh.LinkDel(link); err != nil {
logrus.WithError(err).Errorf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id)
}
}
if err := d.storeDelete(ep); err != nil {
@ -969,7 +971,9 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
}
defer func() {
if err != nil {
d.nlh.LinkDel(host)
if err := d.nlh.LinkDel(host); err != nil {
logrus.WithError(err).Warnf("Failed to delete host side interface (%s)'s link", hostIfName)
}
}
}()
@ -980,7 +984,9 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
}
defer func() {
if err != nil {
d.nlh.LinkDel(sbox)
if err := d.nlh.LinkDel(sbox); err != nil {
logrus.WithError(err).Warnf("Failed to delete sandbox side interface (%s)'s link", containerIfName)
}
}
}()
@ -1117,7 +1123,9 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
// Try removal of link. Discard error: it is a best effort.
// Also make sure defer does not see this error either.
if link, err := d.nlh.LinkByName(ep.srcName); err == nil {
d.nlh.LinkDel(link)
if err := d.nlh.LinkDel(link); err != nil {
logrus.WithError(err).Errorf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id)
}
}
if err := d.storeDelete(ep); err != nil {

View file

@ -76,7 +76,9 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
return fmt.Errorf("endpoint id %q not found", eid)
}
if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil {
ns.NlHandle().LinkDel(link)
if err := ns.NlHandle().LinkDel(link); err != nil {
logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id)
}
}
if err := d.storeDelete(ep); err != nil {

View file

@ -150,7 +150,9 @@ func (d *driver) DeleteNetwork(nid string) error {
}
for _, ep := range n.endpoints {
if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil {
ns.NlHandle().LinkDel(link)
if err := ns.NlHandle().LinkDel(link); err != nil {
logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id)
}
}
if err := d.storeDelete(ep); err != nil {

View file

@ -81,7 +81,9 @@ func (d *driver) DeleteEndpoint(nid, eid string) error {
return fmt.Errorf("endpoint id %q not found", eid)
}
if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil {
ns.NlHandle().LinkDel(link)
if err := ns.NlHandle().LinkDel(link); err != nil {
logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id)
}
}
if err := d.storeDelete(ep); err != nil {

View file

@ -154,7 +154,9 @@ func (d *driver) DeleteNetwork(nid string) error {
}
for _, ep := range n.endpoints {
if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil {
ns.NlHandle().LinkDel(link)
if err := ns.NlHandle().LinkDel(link); err != nil {
logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id)
}
}
if err := d.storeDelete(ep); err != nil {

View file

@ -242,8 +242,10 @@ func (d *driver) DeleteNetwork(nid string) error {
for _, ep := range n.endpoints {
if ep.ifName != "" {
if link, err := ns.NlHandle().LinkByName(ep.ifName); err != nil {
ns.NlHandle().LinkDel(link)
if link, err := ns.NlHandle().LinkByName(ep.ifName); err == nil {
if err := ns.NlHandle().LinkDel(link); err != nil {
logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.ifName, ep.id)
}
}
}

View file

@ -6,9 +6,11 @@ import (
builtinIpam "github.com/docker/libnetwork/ipams/builtin"
nullIpam "github.com/docker/libnetwork/ipams/null"
remoteIpam "github.com/docker/libnetwork/ipams/remote"
"github.com/docker/libnetwork/ipamutils"
)
func initIPAMDrivers(r *drvregistry.DrvRegistry, lDs, gDs interface{}) error {
func initIPAMDrivers(r *drvregistry.DrvRegistry, lDs, gDs interface{}, addressPool []*ipamutils.NetworkToSplit) error {
builtinIpam.SetDefaultIPAddressPool(addressPool)
for _, fn := range [](func(ipamapi.Callback, interface{}, interface{}) error){
builtinIpam.Init,
remoteIpam.Init,

View file

@ -11,6 +11,11 @@ import (
"github.com/docker/libnetwork/ipamutils"
)
var (
// defaultAddressPool Stores user configured subnet list
defaultAddressPool []*ipamutils.NetworkToSplit
)
// Init registers the built-in ipam service with libnetwork
func Init(ic ipamapi.Callback, l, g interface{}) error {
var (
@ -30,7 +35,7 @@ func Init(ic ipamapi.Callback, l, g interface{}) error {
}
}
ipamutils.InitNetworks()
ipamutils.InitNetworks(GetDefaultIPAddressPool())
a, err := ipam.NewAllocator(localDs, globalDs)
if err != nil {
@ -41,3 +46,13 @@ func Init(ic ipamapi.Callback, l, g interface{}) error {
return ic.RegisterIpamDriverWithCapabilities(ipamapi.DefaultIPAM, a, cps)
}
// SetDefaultIPAddressPool stores default address pool.
func SetDefaultIPAddressPool(addressPool []*ipamutils.NetworkToSplit) {
defaultAddressPool = addressPool
}
// GetDefaultIPAddressPool returns default address pool.
func GetDefaultIPAddressPool() []*ipamutils.NetworkToSplit {
return defaultAddressPool
}

View file

@ -13,6 +13,11 @@ import (
windowsipam "github.com/docker/libnetwork/ipams/windowsipam"
)
var (
// defaultAddressPool Stores user configured subnet list
defaultAddressPool []*ipamutils.NetworkToSplit
)
// InitDockerDefault registers the built-in ipam service with libnetwork
func InitDockerDefault(ic ipamapi.Callback, l, g interface{}) error {
var (
@ -32,7 +37,7 @@ func InitDockerDefault(ic ipamapi.Callback, l, g interface{}) error {
}
}
ipamutils.InitNetworks()
ipamutils.InitNetworks(nil)
a, err := ipam.NewAllocator(localDs, globalDs)
if err != nil {
@ -55,3 +60,13 @@ func Init(ic ipamapi.Callback, l, g interface{}) error {
return initFunc(ic, l, g)
}
// SetDefaultIPAddressPool stores default address pool .
func SetDefaultIPAddressPool(addressPool []*ipamutils.NetworkToSplit) {
defaultAddressPool = addressPool
}
// GetDefaultIPAddressPool returns default address pool .
func GetDefaultIPAddressPool() []*ipamutils.NetworkToSplit {
return defaultAddressPool
}

View file

@ -2,8 +2,11 @@
package ipamutils
import (
"fmt"
"net"
"sync"
"github.com/sirupsen/logrus"
)
var (
@ -13,38 +16,81 @@ var (
// PredefinedGranularNetworks contains a list of 64K IPv4 private networks with host size 8
// (10.x.x.x/24) which do not overlap with the networks in `PredefinedBroadNetworks`
PredefinedGranularNetworks []*net.IPNet
initNetworksOnce sync.Once
initNetworksOnce sync.Once
defaultBroadNetwork = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16},
{"172.20.0.0/14", 16}, {"172.24.0.0/14", 16}, {"172.28.0.0/14", 16},
{"192.168.0.0/16", 20}}
defaultGranularNetwork = []*NetworkToSplit{{"10.0.0.0/8", 24}}
)
// InitNetworks initializes the pre-defined networks used by the built-in IP allocator
func InitNetworks() {
// NetworkToSplit represent a network that has to be split in chunks with mask length Size.
// Each subnet in the set is derived from the Base pool. Base is to be passed
// in CIDR format.
// Example: a Base "10.10.0.0/16 with Size 24 will define the set of 256
// 10.10.[0-255].0/24 address pools
type NetworkToSplit struct {
Base string `json:"base"`
Size int `json:"size"`
}
// InitNetworks initializes the broad network pool and the granular network pool
func InitNetworks(defaultAddressPool []*NetworkToSplit) {
initNetworksOnce.Do(func() {
PredefinedBroadNetworks = initBroadPredefinedNetworks()
PredefinedGranularNetworks = initGranularPredefinedNetworks()
// error ingnored should never fail
PredefinedGranularNetworks, _ = splitNetworks(defaultGranularNetwork)
if defaultAddressPool == nil {
defaultAddressPool = defaultBroadNetwork
}
var err error
if PredefinedBroadNetworks, err = splitNetworks(defaultAddressPool); err != nil {
logrus.WithError(err).Error("InitAddressPools failed to initialize the default address pool")
}
})
}
func initBroadPredefinedNetworks() []*net.IPNet {
pl := make([]*net.IPNet, 0, 31)
mask := []byte{255, 255, 0, 0}
for i := 17; i < 32; i++ {
pl = append(pl, &net.IPNet{IP: []byte{172, byte(i), 0, 0}, Mask: mask})
// splitNetworks takes a slice of networks, split them accordingly and returns them
func splitNetworks(list []*NetworkToSplit) ([]*net.IPNet, error) {
localPools := make([]*net.IPNet, 0, len(list))
for _, p := range list {
_, b, err := net.ParseCIDR(p.Base)
if err != nil {
return nil, fmt.Errorf("invalid base pool %q: %v", p.Base, err)
}
ones, _ := b.Mask.Size()
if p.Size <= 0 || p.Size < ones {
return nil, fmt.Errorf("invalid pools size: %d", p.Size)
}
localPools = append(localPools, splitNetwork(p.Size, b)...)
}
mask20 := []byte{255, 255, 240, 0}
for i := 0; i < 16; i++ {
pl = append(pl, &net.IPNet{IP: []byte{192, 168, byte(i << 4), 0}, Mask: mask20})
}
return pl
return localPools, nil
}
func initGranularPredefinedNetworks() []*net.IPNet {
pl := make([]*net.IPNet, 0, 256*256)
mask := []byte{255, 255, 255, 0}
for i := 0; i < 256; i++ {
for j := 0; j < 256; j++ {
pl = append(pl, &net.IPNet{IP: []byte{10, byte(i), byte(j), 0}, Mask: mask})
}
func splitNetwork(size int, base *net.IPNet) []*net.IPNet {
one, bits := base.Mask.Size()
mask := net.CIDRMask(size, bits)
n := 1 << uint(size-one)
s := uint(bits - size)
list := make([]*net.IPNet, 0, n)
for i := 0; i < n; i++ {
ip := copyIP(base.IP)
addIntToIP(ip, uint(i<<s))
list = append(list, &net.IPNet{IP: ip, Mask: mask})
}
return list
}
func copyIP(from net.IP) net.IP {
ip := make([]byte, len(from))
copy(ip, from)
return ip
}
func addIntToIP(array net.IP, ordinal uint) {
for i := len(array) - 1; i >= 0; i-- {
array[i] |= (byte)(ordinal & 0xff)
ordinal >>= 8
}
return pl
}

View file

@ -959,7 +959,7 @@ func (n *network) delete(force bool) error {
if len(n.loadBalancerIP) != 0 {
endpoints := n.Endpoints()
if force || len(endpoints) == 1 {
if force || (len(endpoints) == 1 && !n.ingress) {
n.deleteLoadBalancerSandbox()
}
//Reload the network from the store to update the epcnt.

View file

@ -9,6 +9,8 @@ import (
"github.com/sirupsen/logrus"
)
const maxSetStringLen = 350
func (c *controller) addEndpointNameResolution(svcName, svcID, nID, eID, containerName string, vip net.IP, serviceAliases, taskAliases []string, ip net.IP, addService bool, method string) error {
n, err := c.NetworkByID(nID)
if err != nil {
@ -285,7 +287,10 @@ func (c *controller) addServiceBinding(svcName, svcID, nID, eID, containerName s
ok, entries := s.assignIPToEndpoint(ip.String(), eID)
if !ok || entries > 1 {
setStr, b := s.printIPToEndpoint(ip.String())
logrus.Warnf("addServiceBinding %s possible trainsient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr)
if len(setStr) > maxSetStringLen {
setStr = setStr[:maxSetStringLen]
}
logrus.Warnf("addServiceBinding %s possible transient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr)
}
// Add loadbalancer service and backend in all sandboxes in
@ -353,7 +358,10 @@ func (c *controller) rmServiceBinding(svcName, svcID, nID, eID, containerName st
ok, entries := s.removeIPToEndpoint(ip.String(), eID)
if !ok || entries > 0 {
setStr, b := s.printIPToEndpoint(ip.String())
logrus.Warnf("rmServiceBinding %s possible trainsient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr)
if len(setStr) > maxSetStringLen {
setStr = setStr[:maxSetStringLen]
}
logrus.Warnf("rmServiceBinding %s possible transient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr)
}
// Remove loadbalancer service(if needed) and backend in all

View file

@ -1,5 +1,5 @@
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
github.com/Microsoft/go-winio v0.4.5
github.com/Microsoft/hcsshim v0.6.5
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec