2016-03-28 20:28:57 -04:00
|
|
|
package networkdb
|
|
|
|
|
|
|
|
import (
|
2016-06-04 05:10:19 -04:00
|
|
|
"bytes"
|
2016-03-28 20:28:57 -04:00
|
|
|
"crypto/rand"
|
2016-06-14 06:10:05 -04:00
|
|
|
"encoding/hex"
|
2016-03-28 20:28:57 -04:00
|
|
|
"fmt"
|
2016-12-01 22:08:07 -05:00
|
|
|
"log"
|
2016-03-28 20:28:57 -04:00
|
|
|
"math/big"
|
|
|
|
rnd "math/rand"
|
2016-09-15 01:24:14 -04:00
|
|
|
"net"
|
2016-03-28 20:28:57 -04:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/hashicorp/memberlist"
|
2017-07-26 17:18:31 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2016-03-28 20:28:57 -04:00
|
|
|
)
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
const (
|
2017-09-22 13:23:07 -04:00
|
|
|
// The garbage collection logic for entries leverage the presence of the network.
|
|
|
|
// For this reason the expiration time of the network is put slightly higher than the entry expiration so that
|
|
|
|
// there is at least 5 extra cycle to make sure that all the entries are properly deleted before deleting the network.
|
|
|
|
reapEntryInterval = 30 * time.Minute
|
|
|
|
reapNetworkInterval = reapEntryInterval + 5*reapPeriod
|
|
|
|
reapPeriod = 5 * time.Second
|
|
|
|
retryInterval = 1 * time.Second
|
|
|
|
nodeReapInterval = 24 * time.Hour
|
|
|
|
nodeReapPeriod = 2 * time.Hour
|
2016-09-15 01:24:14 -04:00
|
|
|
)
|
2016-03-28 20:28:57 -04:00
|
|
|
|
|
|
|
type logWriter struct{}
|
|
|
|
|
|
|
|
func (l *logWriter) Write(p []byte) (int, error) {
|
|
|
|
str := string(p)
|
2016-12-01 22:08:07 -05:00
|
|
|
str = strings.TrimSuffix(str, "\n")
|
2016-03-28 20:28:57 -04:00
|
|
|
|
|
|
|
switch {
|
2016-12-01 22:08:07 -05:00
|
|
|
case strings.HasPrefix(str, "[WARN] "):
|
|
|
|
str = strings.TrimPrefix(str, "[WARN] ")
|
2016-03-28 20:28:57 -04:00
|
|
|
logrus.Warn(str)
|
2016-12-01 22:08:07 -05:00
|
|
|
case strings.HasPrefix(str, "[DEBUG] "):
|
|
|
|
str = strings.TrimPrefix(str, "[DEBUG] ")
|
2016-03-28 20:28:57 -04:00
|
|
|
logrus.Debug(str)
|
2016-12-01 22:08:07 -05:00
|
|
|
case strings.HasPrefix(str, "[INFO] "):
|
|
|
|
str = strings.TrimPrefix(str, "[INFO] ")
|
2016-03-28 20:28:57 -04:00
|
|
|
logrus.Info(str)
|
2016-12-01 22:08:07 -05:00
|
|
|
case strings.HasPrefix(str, "[ERR] "):
|
|
|
|
str = strings.TrimPrefix(str, "[ERR] ")
|
2016-03-28 20:28:57 -04:00
|
|
|
logrus.Warn(str)
|
|
|
|
}
|
|
|
|
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
2016-06-04 05:10:19 -04:00
|
|
|
// SetKey adds a new key to the key ring
|
|
|
|
func (nDB *NetworkDB) SetKey(key []byte) {
|
2016-06-14 06:10:05 -04:00
|
|
|
logrus.Debugf("Adding key %s", hex.EncodeToString(key)[0:5])
|
2016-11-22 02:38:03 -05:00
|
|
|
nDB.Lock()
|
|
|
|
defer nDB.Unlock()
|
2016-06-04 05:10:19 -04:00
|
|
|
for _, dbKey := range nDB.config.Keys {
|
|
|
|
if bytes.Equal(key, dbKey) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nDB.config.Keys = append(nDB.config.Keys, key)
|
|
|
|
if nDB.keyring != nil {
|
|
|
|
nDB.keyring.AddKey(key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetPrimaryKey sets the given key as the primary key. This should have
|
|
|
|
// been added apriori through SetKey
|
|
|
|
func (nDB *NetworkDB) SetPrimaryKey(key []byte) {
|
2016-06-14 06:10:05 -04:00
|
|
|
logrus.Debugf("Primary Key %s", hex.EncodeToString(key)[0:5])
|
2016-11-22 02:38:03 -05:00
|
|
|
nDB.RLock()
|
|
|
|
defer nDB.RUnlock()
|
2016-06-04 05:10:19 -04:00
|
|
|
for _, dbKey := range nDB.config.Keys {
|
|
|
|
if bytes.Equal(key, dbKey) {
|
|
|
|
if nDB.keyring != nil {
|
|
|
|
nDB.keyring.UseKey(dbKey)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveKey removes a key from the key ring. The key being removed
|
|
|
|
// can't be the primary key
|
|
|
|
func (nDB *NetworkDB) RemoveKey(key []byte) {
|
2016-06-14 06:10:05 -04:00
|
|
|
logrus.Debugf("Remove Key %s", hex.EncodeToString(key)[0:5])
|
2016-11-22 02:38:03 -05:00
|
|
|
nDB.Lock()
|
|
|
|
defer nDB.Unlock()
|
2016-06-04 05:10:19 -04:00
|
|
|
for i, dbKey := range nDB.config.Keys {
|
|
|
|
if bytes.Equal(key, dbKey) {
|
|
|
|
nDB.config.Keys = append(nDB.config.Keys[:i], nDB.config.Keys[i+1:]...)
|
|
|
|
if nDB.keyring != nil {
|
|
|
|
nDB.keyring.RemoveKey(dbKey)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
func (nDB *NetworkDB) clusterInit() error {
|
2017-07-10 15:05:58 -04:00
|
|
|
nDB.lastStatsTimestamp = time.Now()
|
|
|
|
nDB.lastHealthTimestamp = nDB.lastStatsTimestamp
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
config := memberlist.DefaultLANConfig()
|
2017-09-11 14:35:18 -04:00
|
|
|
config.Name = nDB.config.NodeID
|
2016-09-22 14:38:35 -04:00
|
|
|
config.BindAddr = nDB.config.BindAddr
|
2016-07-19 21:17:30 -04:00
|
|
|
config.AdvertiseAddr = nDB.config.AdvertiseAddr
|
2017-07-10 15:05:58 -04:00
|
|
|
config.UDPBufferSize = nDB.config.PacketBufferSize
|
2016-03-30 17:42:58 -04:00
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
if nDB.config.BindPort != 0 {
|
|
|
|
config.BindPort = nDB.config.BindPort
|
|
|
|
}
|
2016-03-30 17:42:58 -04:00
|
|
|
|
2017-05-08 19:58:53 -04:00
|
|
|
config.ProtocolVersion = memberlist.ProtocolVersion2Compatible
|
2016-03-28 20:28:57 -04:00
|
|
|
config.Delegate = &delegate{nDB: nDB}
|
|
|
|
config.Events = &eventDelegate{nDB: nDB}
|
2016-12-01 22:08:07 -05:00
|
|
|
// custom logger that does not add time or date, so they are not
|
|
|
|
// duplicated by logrus
|
|
|
|
config.Logger = log.New(&logWriter{}, "", 0)
|
2016-03-28 20:28:57 -04:00
|
|
|
|
2016-06-04 05:10:19 -04:00
|
|
|
var err error
|
|
|
|
if len(nDB.config.Keys) > 0 {
|
2016-06-14 06:10:05 -04:00
|
|
|
for i, key := range nDB.config.Keys {
|
|
|
|
logrus.Debugf("Encryption key %d: %s", i+1, hex.EncodeToString(key)[0:5])
|
|
|
|
}
|
2016-06-04 05:10:19 -04:00
|
|
|
nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
config.Keyring = nDB.keyring
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{
|
|
|
|
NumNodes: func() int {
|
2016-10-04 12:35:45 -04:00
|
|
|
nDB.RLock()
|
|
|
|
num := len(nDB.nodes)
|
|
|
|
nDB.RUnlock()
|
|
|
|
return num
|
2016-03-28 20:28:57 -04:00
|
|
|
},
|
|
|
|
RetransmitMult: config.RetransmitMult,
|
|
|
|
}
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
nDB.nodeBroadcasts = &memberlist.TransmitLimitedQueue{
|
|
|
|
NumNodes: func() int {
|
2016-10-04 12:35:45 -04:00
|
|
|
nDB.RLock()
|
|
|
|
num := len(nDB.nodes)
|
|
|
|
nDB.RUnlock()
|
|
|
|
return num
|
2016-09-15 01:24:14 -04:00
|
|
|
},
|
|
|
|
RetransmitMult: config.RetransmitMult,
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
mlist, err := memberlist.Create(config)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to create memberlist: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
nDB.stopCh = make(chan struct{})
|
|
|
|
nDB.memberlist = mlist
|
|
|
|
|
|
|
|
for _, trigger := range []struct {
|
|
|
|
interval time.Duration
|
|
|
|
fn func()
|
|
|
|
}{
|
2016-09-15 01:24:14 -04:00
|
|
|
{reapPeriod, nDB.reapState},
|
2016-03-28 20:28:57 -04:00
|
|
|
{config.GossipInterval, nDB.gossip},
|
|
|
|
{config.PushPullInterval, nDB.bulkSyncTables},
|
2016-09-15 01:24:14 -04:00
|
|
|
{retryInterval, nDB.reconnectNode},
|
2016-09-30 17:03:10 -04:00
|
|
|
{nodeReapPeriod, nDB.reapDeadNode},
|
2016-03-28 20:28:57 -04:00
|
|
|
} {
|
|
|
|
t := time.NewTicker(trigger.interval)
|
|
|
|
go nDB.triggerFunc(trigger.interval, t.C, nDB.stopCh, trigger.fn)
|
|
|
|
nDB.tickers = append(nDB.tickers, t)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
func (nDB *NetworkDB) retryJoin(members []string, stop <-chan struct{}) {
|
|
|
|
t := time.NewTicker(retryInterval)
|
|
|
|
defer t.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-t.C:
|
|
|
|
if _, err := nDB.memberlist.Join(members); err != nil {
|
|
|
|
logrus.Errorf("Failed to join memberlist %s on retry: %v", members, err)
|
|
|
|
continue
|
|
|
|
}
|
2016-09-27 03:12:07 -04:00
|
|
|
if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
|
|
|
|
logrus.Errorf("failed to send node join on retry: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2016-09-15 01:24:14 -04:00
|
|
|
return
|
|
|
|
case <-stop:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
func (nDB *NetworkDB) clusterJoin(members []string) error {
|
|
|
|
mlist := nDB.memberlist
|
|
|
|
|
|
|
|
if _, err := mlist.Join(members); err != nil {
|
2017-07-10 15:05:58 -04:00
|
|
|
// In case of failure, keep retrying join until it succeeds or the cluster is shutdown.
|
2016-09-15 01:24:14 -04:00
|
|
|
go nDB.retryJoin(members, nDB.stopCh)
|
2016-03-28 20:28:57 -04:00
|
|
|
return fmt.Errorf("could not join node to memberlist: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
|
|
|
|
return fmt.Errorf("failed to send node join: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (nDB *NetworkDB) clusterLeave() error {
|
|
|
|
mlist := nDB.memberlist
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
if err := nDB.sendNodeEvent(NodeEventTypeLeave); err != nil {
|
2016-09-23 08:19:07 -04:00
|
|
|
logrus.Errorf("failed to send node leave: %v", err)
|
2016-09-15 01:24:14 -04:00
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
if err := mlist.Leave(time.Second); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
close(nDB.stopCh)
|
|
|
|
|
|
|
|
for _, t := range nDB.tickers {
|
|
|
|
t.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
return mlist.Shutdown()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (nDB *NetworkDB) triggerFunc(stagger time.Duration, C <-chan time.Time, stop <-chan struct{}, f func()) {
|
|
|
|
// Use a random stagger to avoid syncronizing
|
|
|
|
randStagger := time.Duration(uint64(rnd.Int63()) % uint64(stagger))
|
|
|
|
select {
|
|
|
|
case <-time.After(randStagger):
|
|
|
|
case <-stop:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-C:
|
|
|
|
f()
|
|
|
|
case <-stop:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-30 17:03:10 -04:00
|
|
|
func (nDB *NetworkDB) reapDeadNode() {
|
|
|
|
nDB.Lock()
|
|
|
|
defer nDB.Unlock()
|
|
|
|
for id, n := range nDB.failedNodes {
|
|
|
|
if n.reapTime > 0 {
|
2016-11-08 02:28:42 -05:00
|
|
|
n.reapTime -= nodeReapPeriod
|
2016-09-30 17:03:10 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
logrus.Debugf("Removing failed node %v from gossip cluster", n.Name)
|
|
|
|
delete(nDB.failedNodes, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
func (nDB *NetworkDB) reconnectNode() {
|
|
|
|
nDB.RLock()
|
|
|
|
if len(nDB.failedNodes) == 0 {
|
|
|
|
nDB.RUnlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes := make([]*node, 0, len(nDB.failedNodes))
|
|
|
|
for _, n := range nDB.failedNodes {
|
|
|
|
nodes = append(nodes, n)
|
|
|
|
}
|
|
|
|
nDB.RUnlock()
|
|
|
|
|
|
|
|
node := nodes[randomOffset(len(nodes))]
|
|
|
|
addr := net.UDPAddr{IP: node.Addr, Port: int(node.Port)}
|
|
|
|
|
|
|
|
if _, err := nDB.memberlist.Join([]string{addr.String()}); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
logrus.Debugf("Initiating bulk sync with node %s after reconnect", node.Name)
|
|
|
|
nDB.bulkSync([]string{node.Name}, true)
|
|
|
|
}
|
|
|
|
|
2016-09-27 19:38:47 -04:00
|
|
|
// For timing the entry deletion in the repaer APIs that doesn't use monotonic clock
|
|
|
|
// source (time.Now, Sub etc.) should be avoided. Hence we use reapTime in every
|
|
|
|
// entry which is set initially to reapInterval and decremented by reapPeriod every time
|
|
|
|
// the reaper runs. NOTE nDB.reapTableEntries updates the reapTime with a readlock. This
|
|
|
|
// is safe as long as no other concurrent path touches the reapTime field.
|
2016-03-28 20:28:57 -04:00
|
|
|
func (nDB *NetworkDB) reapState() {
|
2017-09-22 13:23:07 -04:00
|
|
|
// The reapTableEntries leverage the presence of the network so garbage collect entries first
|
2016-03-28 20:28:57 -04:00
|
|
|
nDB.reapTableEntries()
|
2017-09-22 13:23:07 -04:00
|
|
|
nDB.reapNetworks()
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (nDB *NetworkDB) reapNetworks() {
|
|
|
|
nDB.Lock()
|
2017-07-05 14:02:04 -04:00
|
|
|
for _, nn := range nDB.networks {
|
2016-03-28 20:28:57 -04:00
|
|
|
for id, n := range nn {
|
2016-09-27 19:38:47 -04:00
|
|
|
if n.leaving {
|
|
|
|
if n.reapTime <= 0 {
|
|
|
|
delete(nn, id)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
n.reapTime -= reapPeriod
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nDB.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (nDB *NetworkDB) reapTableEntries() {
|
2017-09-19 20:36:00 -04:00
|
|
|
var nodeNetworks []string
|
|
|
|
// This is best effort, if the list of network changes will be picked up in the next cycle
|
2016-03-28 20:28:57 -04:00
|
|
|
nDB.RLock()
|
2017-09-11 14:35:18 -04:00
|
|
|
for nid := range nDB.networks[nDB.config.NodeID] {
|
2017-09-19 20:36:00 -04:00
|
|
|
nodeNetworks = append(nodeNetworks, nid)
|
|
|
|
}
|
2016-03-28 20:28:57 -04:00
|
|
|
nDB.RUnlock()
|
|
|
|
|
2017-09-19 20:36:00 -04:00
|
|
|
cycleStart := time.Now()
|
|
|
|
// In order to avoid blocking the database for a long time, apply the garbage collection logic by network
|
|
|
|
// The lock is taken at the beginning of the cycle and the deletion is inline
|
|
|
|
for _, nid := range nodeNetworks {
|
|
|
|
nDB.Lock()
|
|
|
|
nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
|
|
|
|
// timeCompensation compensate in case the lock took some time to be released
|
|
|
|
timeCompensation := time.Since(cycleStart)
|
|
|
|
entry, ok := v.(*entry)
|
|
|
|
if !ok || !entry.deleting {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// In this check we are adding an extra 1 second to guarantee that when the number is truncated to int32 to fit the packet
|
|
|
|
// for the tableEvent the number is always strictly > 1 and never 0
|
|
|
|
if entry.reapTime > reapPeriod+timeCompensation+time.Second {
|
|
|
|
entry.reapTime -= reapPeriod + timeCompensation
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
params := strings.Split(path[1:], "/")
|
|
|
|
nid := params[0]
|
|
|
|
tname := params[1]
|
|
|
|
key := params[2]
|
|
|
|
|
|
|
|
okTable, okNetwork := nDB.deleteEntry(nid, tname, key)
|
|
|
|
if !okTable {
|
|
|
|
logrus.Errorf("Table tree delete failed, entry with key:%s does not exists in the table:%s network:%s", key, tname, nid)
|
|
|
|
}
|
|
|
|
if !okNetwork {
|
|
|
|
logrus.Errorf("Network tree delete failed, entry with key:%s does not exists in the network:%s table:%s", key, nid, tname)
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
nDB.Unlock()
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (nDB *NetworkDB) gossip() {
|
|
|
|
networkNodes := make(map[string][]string)
|
|
|
|
nDB.RLock()
|
2017-09-11 14:35:18 -04:00
|
|
|
thisNodeNetworks := nDB.networks[nDB.config.NodeID]
|
2016-06-02 23:29:37 -04:00
|
|
|
for nid := range thisNodeNetworks {
|
2016-03-28 20:28:57 -04:00
|
|
|
networkNodes[nid] = nDB.networkNodes[nid]
|
|
|
|
|
|
|
|
}
|
2017-07-10 15:05:58 -04:00
|
|
|
printStats := time.Since(nDB.lastStatsTimestamp) >= nDB.config.StatsPrintPeriod
|
|
|
|
printHealth := time.Since(nDB.lastHealthTimestamp) >= nDB.config.HealthPrintPeriod
|
2016-03-28 20:28:57 -04:00
|
|
|
nDB.RUnlock()
|
|
|
|
|
2017-07-10 15:05:58 -04:00
|
|
|
if printHealth {
|
|
|
|
healthScore := nDB.memberlist.GetHealthScore()
|
|
|
|
if healthScore != 0 {
|
2017-09-11 14:35:18 -04:00
|
|
|
logrus.Warnf("NetworkDB stats %v(%v) - healthscore:%d (connectivity issues)", nDB.config.Hostname, nDB.config.NodeID, healthScore)
|
2017-07-10 15:05:58 -04:00
|
|
|
}
|
|
|
|
nDB.lastHealthTimestamp = time.Now()
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
for nid, nodes := range networkNodes {
|
|
|
|
mNodes := nDB.mRandomNodes(3, nodes)
|
2017-07-10 15:05:58 -04:00
|
|
|
bytesAvail := nDB.config.PacketBufferSize - compoundHeaderOverhead
|
2016-03-28 20:28:57 -04:00
|
|
|
|
|
|
|
nDB.RLock()
|
2016-06-02 23:29:37 -04:00
|
|
|
network, ok := thisNodeNetworks[nid]
|
2016-03-28 20:28:57 -04:00
|
|
|
nDB.RUnlock()
|
2016-06-02 23:29:37 -04:00
|
|
|
if !ok || network == nil {
|
|
|
|
// It is normal for the network to be removed
|
|
|
|
// between the time we collect the network
|
|
|
|
// attachments of this node and processing
|
|
|
|
// them here.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
broadcastQ := network.tableBroadcasts
|
2016-03-28 20:28:57 -04:00
|
|
|
|
2016-04-23 16:26:34 -04:00
|
|
|
if broadcastQ == nil {
|
|
|
|
logrus.Errorf("Invalid broadcastQ encountered while gossiping for network %s", nid)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
msgs := broadcastQ.GetBroadcasts(compoundOverhead, bytesAvail)
|
2017-07-10 15:05:58 -04:00
|
|
|
// Collect stats and print the queue info, note this code is here also to have a view of the queues empty
|
|
|
|
network.qMessagesSent += len(msgs)
|
|
|
|
if printStats {
|
2017-09-11 14:35:18 -04:00
|
|
|
logrus.Infof("NetworkDB stats %v(%v) - netID:%s leaving:%t netPeers:%d entries:%d Queue qLen:%d netMsg/s:%d",
|
|
|
|
nDB.config.Hostname, nDB.config.NodeID,
|
2017-09-22 13:23:07 -04:00
|
|
|
nid, network.leaving, broadcastQ.NumNodes(), network.entriesNumber, broadcastQ.NumQueued(),
|
2017-09-19 16:42:35 -04:00
|
|
|
network.qMessagesSent/int((nDB.config.StatsPrintPeriod/time.Second)))
|
2017-07-10 15:05:58 -04:00
|
|
|
network.qMessagesSent = 0
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
if len(msgs) == 0 {
|
2016-04-23 16:26:34 -04:00
|
|
|
continue
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a compound message
|
|
|
|
compound := makeCompoundMessage(msgs)
|
|
|
|
|
|
|
|
for _, node := range mNodes {
|
|
|
|
nDB.RLock()
|
|
|
|
mnode := nDB.nodes[node]
|
|
|
|
nDB.RUnlock()
|
|
|
|
|
|
|
|
if mnode == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send the compound message
|
2017-07-10 15:05:58 -04:00
|
|
|
if err := nDB.memberlist.SendBestEffort(&mnode.Node, compound); err != nil {
|
2016-03-28 20:28:57 -04:00
|
|
|
logrus.Errorf("Failed to send gossip to %s: %s", mnode.Addr, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-07-10 15:05:58 -04:00
|
|
|
// Reset the stats
|
|
|
|
if printStats {
|
|
|
|
nDB.lastStatsTimestamp = time.Now()
|
|
|
|
}
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (nDB *NetworkDB) bulkSyncTables() {
|
|
|
|
var networks []string
|
|
|
|
nDB.RLock()
|
2017-09-11 14:35:18 -04:00
|
|
|
for nid, network := range nDB.networks[nDB.config.NodeID] {
|
2016-08-10 15:44:05 -04:00
|
|
|
if network.leaving {
|
|
|
|
continue
|
|
|
|
}
|
2016-03-28 20:28:57 -04:00
|
|
|
networks = append(networks, nid)
|
|
|
|
}
|
|
|
|
nDB.RUnlock()
|
|
|
|
|
|
|
|
for {
|
|
|
|
if len(networks) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
nid := networks[0]
|
|
|
|
networks = networks[1:]
|
|
|
|
|
2016-06-14 15:39:38 -04:00
|
|
|
nDB.RLock()
|
|
|
|
nodes := nDB.networkNodes[nid]
|
|
|
|
nDB.RUnlock()
|
|
|
|
|
|
|
|
// No peer nodes on this network. Move on.
|
|
|
|
if len(nodes) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
completed, err := nDB.bulkSync(nodes, false)
|
2016-03-28 20:28:57 -04:00
|
|
|
if err != nil {
|
|
|
|
logrus.Errorf("periodic bulk sync failure for network %s: %v", nid, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove all the networks for which we have
|
|
|
|
// successfully completed bulk sync in this iteration.
|
|
|
|
updatedNetworks := make([]string, 0, len(networks))
|
|
|
|
for _, nid := range networks {
|
2016-06-16 02:38:48 -04:00
|
|
|
var found bool
|
2016-03-28 20:28:57 -04:00
|
|
|
for _, completedNid := range completed {
|
|
|
|
if nid == completedNid {
|
2016-06-16 02:38:48 -04:00
|
|
|
found = true
|
|
|
|
break
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
2016-06-16 02:38:48 -04:00
|
|
|
}
|
2016-03-28 20:28:57 -04:00
|
|
|
|
2016-06-16 02:38:48 -04:00
|
|
|
if !found {
|
2016-03-28 20:28:57 -04:00
|
|
|
updatedNetworks = append(updatedNetworks, nid)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
networks = updatedNetworks
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
func (nDB *NetworkDB) bulkSync(nodes []string, all bool) ([]string, error) {
|
2016-03-28 20:28:57 -04:00
|
|
|
if !all {
|
2017-05-10 14:04:32 -04:00
|
|
|
// Get 2 random nodes. 2nd node will be tried if the bulk sync to
|
|
|
|
// 1st node fails.
|
|
|
|
nodes = nDB.mRandomNodes(2, nodes)
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
2016-07-11 12:09:49 -04:00
|
|
|
if len(nodes) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
var err error
|
|
|
|
var networks []string
|
|
|
|
for _, node := range nodes {
|
2017-09-11 14:35:18 -04:00
|
|
|
if node == nDB.config.NodeID {
|
2016-03-28 20:28:57 -04:00
|
|
|
continue
|
|
|
|
}
|
2017-09-11 14:35:18 -04:00
|
|
|
logrus.Debugf("%v(%v): Initiating bulk sync with node %v", nDB.config.Hostname, nDB.config.NodeID, node)
|
2016-03-28 20:28:57 -04:00
|
|
|
networks = nDB.findCommonNetworks(node)
|
|
|
|
err = nDB.bulkSyncNode(networks, node, true)
|
2017-05-10 14:04:32 -04:00
|
|
|
// if its periodic bulksync stop after the first successful sync
|
|
|
|
if !all && err == nil {
|
|
|
|
break
|
|
|
|
}
|
2016-03-28 20:28:57 -04:00
|
|
|
if err != nil {
|
2017-05-10 14:04:32 -04:00
|
|
|
err = fmt.Errorf("bulk sync to node %s failed: %v", node, err)
|
|
|
|
logrus.Warn(err.Error())
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return networks, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Bulk sync all the table entries belonging to a set of networks to a
|
|
|
|
// single peer node. It can be unsolicited or can be in response to an
|
|
|
|
// unsolicited bulk sync
|
|
|
|
func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited bool) error {
|
|
|
|
var msgs [][]byte
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
var unsolMsg string
|
|
|
|
if unsolicited {
|
|
|
|
unsolMsg = "unsolicited"
|
|
|
|
}
|
|
|
|
|
2017-09-11 14:35:18 -04:00
|
|
|
logrus.Debugf("%v(%v): Initiating %s bulk sync for networks %v with node %s",
|
|
|
|
nDB.config.Hostname, nDB.config.NodeID, unsolMsg, networks, node)
|
2016-03-28 20:28:57 -04:00
|
|
|
|
|
|
|
nDB.RLock()
|
|
|
|
mnode := nDB.nodes[node]
|
|
|
|
if mnode == nil {
|
|
|
|
nDB.RUnlock()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, nid := range networks {
|
|
|
|
nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
|
|
|
|
entry, ok := v.(*entry)
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
eType := TableEventTypeCreate
|
2016-06-18 20:57:29 -04:00
|
|
|
if entry.deleting {
|
2016-09-15 01:24:14 -04:00
|
|
|
eType = TableEventTypeDelete
|
2016-06-18 20:57:29 -04:00
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
params := strings.Split(path[1:], "/")
|
2016-05-17 00:42:35 -04:00
|
|
|
tEvent := TableEvent{
|
2016-09-15 01:24:14 -04:00
|
|
|
Type: eType,
|
2016-03-28 20:28:57 -04:00
|
|
|
LTime: entry.ltime,
|
|
|
|
NodeName: entry.node,
|
|
|
|
NetworkID: nid,
|
|
|
|
TableName: params[1],
|
|
|
|
Key: params[2],
|
|
|
|
Value: entry.value,
|
2017-09-19 16:42:35 -04:00
|
|
|
// The duration in second is a float that below would be truncated
|
|
|
|
ResidualReapTime: int32(entry.reapTime.Seconds()),
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
2016-05-17 00:42:35 -04:00
|
|
|
msg, err := encodeMessage(MessageTypeTableEvent, &tEvent)
|
2016-03-28 20:28:57 -04:00
|
|
|
if err != nil {
|
|
|
|
logrus.Errorf("Encode failure during bulk sync: %#v", tEvent)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
msgs = append(msgs, msg)
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
}
|
|
|
|
nDB.RUnlock()
|
|
|
|
|
|
|
|
// Create a compound message
|
|
|
|
compound := makeCompoundMessage(msgs)
|
|
|
|
|
2016-05-17 00:42:35 -04:00
|
|
|
bsm := BulkSyncMessage{
|
2016-03-28 20:28:57 -04:00
|
|
|
LTime: nDB.tableClock.Time(),
|
|
|
|
Unsolicited: unsolicited,
|
2017-09-11 14:35:18 -04:00
|
|
|
NodeName: nDB.config.NodeID,
|
2016-03-28 20:28:57 -04:00
|
|
|
Networks: networks,
|
2016-05-17 00:42:35 -04:00
|
|
|
Payload: compound,
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
2016-05-17 00:42:35 -04:00
|
|
|
buf, err := encodeMessage(MessageTypeBulkSync, &bsm)
|
2016-03-28 20:28:57 -04:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to encode bulk sync message: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
nDB.Lock()
|
|
|
|
ch := make(chan struct{})
|
|
|
|
nDB.bulkSyncAckTbl[node] = ch
|
|
|
|
nDB.Unlock()
|
|
|
|
|
2017-07-10 15:05:58 -04:00
|
|
|
err = nDB.memberlist.SendReliable(&mnode.Node, buf)
|
2016-03-28 20:28:57 -04:00
|
|
|
if err != nil {
|
|
|
|
nDB.Lock()
|
|
|
|
delete(nDB.bulkSyncAckTbl, node)
|
|
|
|
nDB.Unlock()
|
|
|
|
|
|
|
|
return fmt.Errorf("failed to send a TCP message during bulk sync: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-06-10 17:46:30 -04:00
|
|
|
// Wait on a response only if it is unsolicited.
|
|
|
|
if unsolicited {
|
|
|
|
startTime := time.Now()
|
2016-06-16 02:38:48 -04:00
|
|
|
t := time.NewTimer(30 * time.Second)
|
2016-06-10 17:46:30 -04:00
|
|
|
select {
|
2016-06-16 02:38:48 -04:00
|
|
|
case <-t.C:
|
2016-06-10 17:46:30 -04:00
|
|
|
logrus.Errorf("Bulk sync to node %s timed out", node)
|
|
|
|
case <-ch:
|
2017-09-11 14:35:18 -04:00
|
|
|
logrus.Debugf("%v(%v): Bulk sync to node %s took %s", nDB.config.Hostname, nDB.config.NodeID, node, time.Since(startTime))
|
2016-06-10 17:46:30 -04:00
|
|
|
}
|
2016-06-16 02:38:48 -04:00
|
|
|
t.Stop()
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns a random offset between 0 and n
|
|
|
|
func randomOffset(n int) int {
|
|
|
|
if n == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
val, err := rand.Int(rand.Reader, big.NewInt(int64(n)))
|
|
|
|
if err != nil {
|
|
|
|
logrus.Errorf("Failed to get a random offset: %v", err)
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return int(val.Int64())
|
|
|
|
}
|
|
|
|
|
|
|
|
// mRandomNodes is used to select up to m random nodes. It is possible
|
|
|
|
// that less than m nodes are returned.
|
|
|
|
func (nDB *NetworkDB) mRandomNodes(m int, nodes []string) []string {
|
|
|
|
n := len(nodes)
|
|
|
|
mNodes := make([]string, 0, m)
|
|
|
|
OUTER:
|
|
|
|
// Probe up to 3*n times, with large n this is not necessary
|
|
|
|
// since k << n, but with small n we want search to be
|
|
|
|
// exhaustive
|
|
|
|
for i := 0; i < 3*n && len(mNodes) < m; i++ {
|
|
|
|
// Get random node
|
|
|
|
idx := randomOffset(n)
|
|
|
|
node := nodes[idx]
|
|
|
|
|
2017-09-11 14:35:18 -04:00
|
|
|
if node == nDB.config.NodeID {
|
2016-03-28 20:28:57 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we have this node already
|
|
|
|
for j := 0; j < len(mNodes); j++ {
|
|
|
|
if node == mNodes[j] {
|
|
|
|
continue OUTER
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append the node
|
|
|
|
mNodes = append(mNodes, node)
|
|
|
|
}
|
|
|
|
|
|
|
|
return mNodes
|
|
|
|
}
|