1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Adding Advertise-addr support

With this change, all the auto-detection of the addresses are removed
from libnetwork and the caller takes the responsibilty to have a proper
advertise-addr in various scenarios (including externally facing public
advertise-addr with an internal facing private listen-addr)

Signed-off-by: Madhu Venugopal <madhu@docker.com>
This commit is contained in:
Madhu Venugopal 2016-07-19 18:17:30 -07:00
parent cc69d2452c
commit 6368406c26
10 changed files with 68 additions and 62 deletions

View file

@ -35,6 +35,7 @@ func (b ByTime) Less(i, j int) bool { return b[i].LamportTime < b[j].LamportTime
type agent struct { type agent struct {
networkDB *networkdb.NetworkDB networkDB *networkdb.NetworkDB
bindAddr string bindAddr string
advertiseAddr string
epTblCancel func() epTblCancel func()
driverCancelFuncs map[string][]func() driverCancelFuncs map[string][]func()
} }
@ -236,25 +237,14 @@ func (c *controller) handleKeyChangeV1(keys []*types.EncryptionKey) error {
func (c *controller) agentSetup() error { func (c *controller) agentSetup() error {
clusterProvider := c.cfg.Daemon.ClusterProvider clusterProvider := c.cfg.Daemon.ClusterProvider
bindAddr, _, _ := net.SplitHostPort(clusterProvider.GetListenAddress()) bindAddr := clusterProvider.GetLocalAddress()
advAddr := clusterProvider.GetAdvertiseAddress()
remote := clusterProvider.GetRemoteAddress() remote := clusterProvider.GetRemoteAddress()
remoteAddr, _, _ := net.SplitHostPort(remote) remoteAddr, _, _ := net.SplitHostPort(remote)
// Determine the BindAddress from RemoteAddress or through best-effort routing logrus.Infof("Initializing Libnetwork Agent Local-addr=%s Adv-addr=%s Remote-addr =%s", bindAddr, advAddr, remoteAddr)
if !isValidClusteringIP(bindAddr) { if advAddr != "" && c.agent == nil {
if !isValidClusteringIP(remoteAddr) { if err := c.agentInit(bindAddr, advAddr); err != nil {
remote = "8.8.8.8:53"
}
conn, err := net.Dial("udp", remote)
if err == nil {
bindHostPort := conn.LocalAddr().String()
bindAddr, _, _ = net.SplitHostPort(bindHostPort)
conn.Close()
}
}
if bindAddr != "" && c.agent == nil {
if err := c.agentInit(bindAddr); err != nil {
logrus.Errorf("Error in agentInit : %v", err) logrus.Errorf("Error in agentInit : %v", err)
} else { } else {
c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool {
@ -312,7 +302,7 @@ func (c *controller) getPrimaryKeyTag(subsys string) ([]byte, uint64) {
return keys[1].Key, keys[1].LamportTime return keys[1].Key, keys[1].LamportTime
} }
func (c *controller) agentInit(bindAddrOrInterface string) error { func (c *controller) agentInit(bindAddrOrInterface, advertiseAddr string) error {
if !c.isAgent() { if !c.isAgent() {
return nil return nil
} }
@ -325,9 +315,9 @@ func (c *controller) agentInit(bindAddrOrInterface string) error {
keys, tags := c.getKeys(subsysGossip) keys, tags := c.getKeys(subsysGossip)
hostname, _ := os.Hostname() hostname, _ := os.Hostname()
nDB, err := networkdb.New(&networkdb.Config{ nDB, err := networkdb.New(&networkdb.Config{
BindAddr: bindAddr, AdvertiseAddr: advertiseAddr,
NodeName: hostname, NodeName: hostname,
Keys: keys, Keys: keys,
}) })
if err != nil { if err != nil {
@ -339,6 +329,7 @@ func (c *controller) agentInit(bindAddrOrInterface string) error {
c.agent = &agent{ c.agent = &agent{
networkDB: nDB, networkDB: nDB,
bindAddr: bindAddr, bindAddr: bindAddr,
advertiseAddr: advertiseAddr,
epTblCancel: cancel, epTblCancel: cancel,
driverCancelFuncs: make(map[string][]func()), driverCancelFuncs: make(map[string][]func()),
} }
@ -377,8 +368,9 @@ func (c *controller) agentDriverNotify(d driverapi.Driver) {
} }
d.DiscoverNew(discoverapi.NodeDiscovery, discoverapi.NodeDiscoveryData{ d.DiscoverNew(discoverapi.NodeDiscovery, discoverapi.NodeDiscoveryData{
Address: c.agent.bindAddr, Address: c.agent.advertiseAddr,
Self: true, BindAddress: c.agent.bindAddr,
Self: true,
}) })
drvEnc := discoverapi.DriverEncryptionConfig{} drvEnc := discoverapi.DriverEncryptionConfig{}

View file

@ -4,7 +4,8 @@ package cluster
type Provider interface { type Provider interface {
IsManager() bool IsManager() bool
IsAgent() bool IsAgent() bool
GetListenAddress() string GetLocalAddress() string
GetAdvertiseAddress() string
GetRemoteAddress() string GetRemoteAddress() string
ListenClusterEvents() <-chan struct{} ListenClusterEvents() <-chan struct{}
} }

View file

@ -306,7 +306,11 @@ func (d *dnetConnection) IsAgent() bool {
return d.Orchestration.Agent return d.Orchestration.Agent
} }
func (d *dnetConnection) GetListenAddress() string { func (d *dnetConnection) GetAdvertiseAddress() string {
return d.Orchestration.Bind
}
func (d *dnetConnection) GetLocalAddress() string {
return d.Orchestration.Bind return d.Orchestration.Bind
} }

View file

@ -26,8 +26,9 @@ const (
// NodeDiscoveryData represents the structure backing the node discovery data json string // NodeDiscoveryData represents the structure backing the node discovery data json string
type NodeDiscoveryData struct { type NodeDiscoveryData struct {
Address string Address string
Self bool BindAddress string
Self bool
} }
// DatastoreConfigData is the data for the datastore update event message // DatastoreConfigData is the data for the datastore update event message

View file

@ -8,12 +8,13 @@ import (
"sync" "sync"
"syscall" "syscall"
"strconv"
log "github.com/Sirupsen/logrus" log "github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/iptables" "github.com/docker/libnetwork/iptables"
"github.com/docker/libnetwork/ns" "github.com/docker/libnetwork/ns"
"github.com/docker/libnetwork/types" "github.com/docker/libnetwork/types"
"github.com/vishvananda/netlink" "github.com/vishvananda/netlink"
"strconv"
) )
const ( const (
@ -85,6 +86,7 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal
} }
lIP := types.GetMinimalIP(net.ParseIP(d.bindAddress)) lIP := types.GetMinimalIP(net.ParseIP(d.bindAddress))
aIP := types.GetMinimalIP(net.ParseIP(d.advertiseAddress))
nodes := map[string]net.IP{} nodes := map[string]net.IP{}
switch { switch {
@ -107,7 +109,7 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal
if add { if add {
for _, rIP := range nodes { for _, rIP := range nodes {
if err := setupEncryption(lIP, rIP, vxlanID, d.secMap, d.keys); err != nil { if err := setupEncryption(lIP, aIP, rIP, vxlanID, d.secMap, d.keys); err != nil {
log.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err) log.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err)
} }
} }
@ -122,7 +124,7 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal
return nil return nil
} }
func setupEncryption(localIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*key) error { func setupEncryption(localIP, advIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*key) error {
log.Debugf("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP) log.Debugf("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP)
rIPs := remoteIP.String() rIPs := remoteIP.String()
@ -134,7 +136,7 @@ func setupEncryption(localIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*
} }
for i, k := range keys { for i, k := range keys {
spis := &spi{buildSPI(localIP, remoteIP, k.tag), buildSPI(remoteIP, localIP, k.tag)} spis := &spi{buildSPI(advIP, remoteIP, k.tag), buildSPI(remoteIP, advIP, k.tag)}
dir := reverse dir := reverse
if i == 0 { if i == 0 {
dir = bidir dir = bidir

View file

@ -119,7 +119,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
} }
d.peerDbAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, d.peerDbAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac,
net.ParseIP(d.bindAddress), true) net.ParseIP(d.advertiseAddress), true)
if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil { if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
log.Warn(err) log.Warn(err)
@ -128,7 +128,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
buf, err := proto.Marshal(&PeerRecord{ buf, err := proto.Marshal(&PeerRecord{
EndpointIP: ep.addr.String(), EndpointIP: ep.addr.String(),
EndpointMAC: ep.mac.String(), EndpointMAC: ep.mac.String(),
TunnelEndpointIP: d.bindAddress, TunnelEndpointIP: d.advertiseAddress,
}) })
if err != nil { if err != nil {
return err return err
@ -159,7 +159,7 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri
// Ignore local peers. We already know about them and they // Ignore local peers. We already know about them and they
// should not be added to vxlan fdb. // should not be added to vxlan fdb.
if peer.TunnelEndpointIP == d.bindAddress { if peer.TunnelEndpointIP == d.advertiseAddress {
return return
} }

View file

@ -40,7 +40,7 @@ func (d *driver) serfInit() error {
config := serf.DefaultConfig() config := serf.DefaultConfig()
config.Init() config.Init()
config.MemberlistConfig.BindAddr = d.bindAddress config.MemberlistConfig.BindAddr = d.advertiseAddress
d.eventCh = make(chan serf.Event, 4) d.eventCh = make(chan serf.Event, 4)
config.EventCh = d.eventCh config.EventCh = d.eventCh

View file

@ -31,22 +31,23 @@ const (
var initVxlanIdm = make(chan (bool), 1) var initVxlanIdm = make(chan (bool), 1)
type driver struct { type driver struct {
eventCh chan serf.Event eventCh chan serf.Event
notifyCh chan ovNotify notifyCh chan ovNotify
exitCh chan chan struct{} exitCh chan chan struct{}
bindAddress string bindAddress string
neighIP string advertiseAddress string
config map[string]interface{} neighIP string
peerDb peerNetworkMap config map[string]interface{}
secMap *encrMap peerDb peerNetworkMap
serfInstance *serf.Serf secMap *encrMap
networks networkTable serfInstance *serf.Serf
store datastore.DataStore networks networkTable
localStore datastore.DataStore store datastore.DataStore
vxlanIdm *idm.Idm localStore datastore.DataStore
once sync.Once vxlanIdm *idm.Idm
joinOnce sync.Once once sync.Once
keys []*key joinOnce sync.Once
keys []*key
sync.Mutex sync.Mutex
} }
@ -144,7 +145,7 @@ func (d *driver) restoreEndpoints() error {
} }
n.incEndpointCount() n.incEndpointCount()
d.peerDbAdd(ep.nid, ep.id, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.bindAddress), true) d.peerDbAdd(ep.nid, ep.id, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), true)
} }
return nil return nil
} }
@ -215,20 +216,25 @@ func validateSelf(node string) error {
return fmt.Errorf("Multi-Host overlay networking requires cluster-advertise(%s) to be configured with a local ip-address that is reachable within the cluster", advIP.String()) return fmt.Errorf("Multi-Host overlay networking requires cluster-advertise(%s) to be configured with a local ip-address that is reachable within the cluster", advIP.String())
} }
func (d *driver) nodeJoin(node string, self bool) { func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) {
if self && !d.isSerfAlive() { if self && !d.isSerfAlive() {
if err := validateSelf(node); err != nil {
logrus.Errorf("%s", err.Error())
}
d.Lock() d.Lock()
d.bindAddress = node d.advertiseAddress = advertiseAddress
d.bindAddress = bindAddress
d.Unlock() d.Unlock()
// If there is no cluster store there is no need to start serf. // If there is no cluster store there is no need to start serf.
if d.store != nil { if d.store != nil {
if err := validateSelf(advertiseAddress); err != nil {
logrus.Warnf("%s", err.Error())
}
err := d.serfInit() err := d.serfInit()
if err != nil { if err != nil {
logrus.Errorf("initializing serf instance failed: %v", err) logrus.Errorf("initializing serf instance failed: %v", err)
d.Lock()
d.advertiseAddress = ""
d.bindAddress = ""
d.Unlock()
return return
} }
} }
@ -236,7 +242,7 @@ func (d *driver) nodeJoin(node string, self bool) {
d.Lock() d.Lock()
if !self { if !self {
d.neighIP = node d.neighIP = advertiseAddress
} }
neighIP := d.neighIP neighIP := d.neighIP
d.Unlock() d.Unlock()
@ -250,7 +256,7 @@ func (d *driver) nodeJoin(node string, self bool) {
} }
}) })
if err != nil { if err != nil {
logrus.Errorf("joining serf neighbor %s failed: %v", node, err) logrus.Errorf("joining serf neighbor %s failed: %v", advertiseAddress, err)
d.Lock() d.Lock()
d.joinOnce = sync.Once{} d.joinOnce = sync.Once{}
d.Unlock() d.Unlock()
@ -290,7 +296,7 @@ func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{})
if !ok || nodeData.Address == "" { if !ok || nodeData.Address == "" {
return fmt.Errorf("invalid discovery data") return fmt.Errorf("invalid discovery data")
} }
d.nodeJoin(nodeData.Address, nodeData.Self) d.nodeJoin(nodeData.Address, nodeData.BindAddress, nodeData.Self)
case discoverapi.DatastoreConfig: case discoverapi.DatastoreConfig:
if d.store != nil { if d.store != nil {
return types.ForbiddenErrorf("cannot accept datastore configuration: Overlay driver has a datastore configured already") return types.ForbiddenErrorf("cannot accept datastore configuration: Overlay driver has a datastore configured already")

View file

@ -81,7 +81,7 @@ func (nDB *NetworkDB) RemoveKey(key []byte) {
func (nDB *NetworkDB) clusterInit() error { func (nDB *NetworkDB) clusterInit() error {
config := memberlist.DefaultLANConfig() config := memberlist.DefaultLANConfig()
config.Name = nDB.config.NodeName config.Name = nDB.config.NodeName
config.BindAddr = nDB.config.BindAddr config.AdvertiseAddr = nDB.config.AdvertiseAddr
if nDB.config.BindPort != 0 { if nDB.config.BindPort != 0 {
config.BindPort = nDB.config.BindPort config.BindPort = nDB.config.BindPort

View file

@ -107,9 +107,9 @@ type Config struct {
// NodeName is the cluster wide unique name for this node. // NodeName is the cluster wide unique name for this node.
NodeName string NodeName string
// BindAddr is the local node's IP address that we bind to for // AdvertiseAddr is the node's IP address that we advertise for
// cluster communication. // cluster communication.
BindAddr string AdvertiseAddr string
// BindPort is the local node's port to which we bind to for // BindPort is the local node's port to which we bind to for
// cluster communication. // cluster communication.