2016-03-28 20:28:57 -04:00
|
|
|
package networkdb
|
|
|
|
|
2021-04-05 20:24:47 -04:00
|
|
|
//go:generate protoc -I.:../vendor/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/docker/libnetwork/networkdb,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. networkdb.proto
|
2016-05-17 00:42:35 -04:00
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
import (
|
2018-04-06 16:07:14 -04:00
|
|
|
"context"
|
2016-03-28 20:28:57 -04:00
|
|
|
"fmt"
|
2017-07-10 15:05:58 -04:00
|
|
|
"os"
|
2016-03-28 20:28:57 -04:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/armon/go-radix"
|
2021-05-27 20:15:56 -04:00
|
|
|
"github.com/docker/docker/libnetwork/types"
|
2017-09-11 14:35:18 -04:00
|
|
|
"github.com/docker/docker/pkg/stringid"
|
2016-03-28 20:28:57 -04:00
|
|
|
"github.com/docker/go-events"
|
|
|
|
"github.com/hashicorp/memberlist"
|
|
|
|
"github.com/hashicorp/serf/serf"
|
2017-07-26 17:18:31 -04:00
|
|
|
"github.com/sirupsen/logrus"
|
2016-03-28 20:28:57 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
byTable int = 1 + iota
|
|
|
|
byNetwork
|
|
|
|
)
|
|
|
|
|
|
|
|
// NetworkDB instance drives the networkdb cluster and acts the broker
|
|
|
|
// for cluster-scoped and network-scoped gossip and watches.
|
|
|
|
type NetworkDB struct {
|
2016-11-11 01:40:16 -05:00
|
|
|
// The clocks MUST be the first things
|
|
|
|
// in this struct due to Golang issue #599.
|
|
|
|
|
|
|
|
// Global lamport clock for node network attach events.
|
|
|
|
networkClock serf.LamportClock
|
|
|
|
|
|
|
|
// Global lamport clock for table events.
|
|
|
|
tableClock serf.LamportClock
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
sync.RWMutex
|
|
|
|
|
|
|
|
// NetworkDB configuration.
|
|
|
|
config *Config
|
|
|
|
|
|
|
|
// All the tree index (byTable, byNetwork) that we maintain
|
|
|
|
// the db.
|
|
|
|
indexes map[int]*radix.Tree
|
|
|
|
|
|
|
|
// Memberlist we use to drive the cluster.
|
|
|
|
memberlist *memberlist.Memberlist
|
|
|
|
|
|
|
|
// List of all peer nodes in the cluster not-limited to any
|
|
|
|
// network.
|
2016-09-15 01:24:14 -04:00
|
|
|
nodes map[string]*node
|
|
|
|
|
|
|
|
// List of all peer nodes which have failed
|
|
|
|
failedNodes map[string]*node
|
|
|
|
|
|
|
|
// List of all peer nodes which have left
|
|
|
|
leftNodes map[string]*node
|
2016-03-28 20:28:57 -04:00
|
|
|
|
2017-05-21 22:25:52 -04:00
|
|
|
// A multi-dimensional map of network/node attachments. The
|
2016-03-28 20:28:57 -04:00
|
|
|
// first key is a node name and the second key is a network ID
|
|
|
|
// for the network that node is participating in.
|
|
|
|
networks map[string]map[string]*network
|
|
|
|
|
|
|
|
// A map of nodes which are participating in a given
|
|
|
|
// network. The key is a network ID.
|
|
|
|
networkNodes map[string][]string
|
|
|
|
|
|
|
|
// A table of ack channels for every node from which we are
|
|
|
|
// waiting for an ack.
|
|
|
|
bulkSyncAckTbl map[string]chan struct{}
|
|
|
|
|
|
|
|
// Broadcast queue for network event gossip.
|
|
|
|
networkBroadcasts *memberlist.TransmitLimitedQueue
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
// Broadcast queue for node event gossip.
|
|
|
|
nodeBroadcasts *memberlist.TransmitLimitedQueue
|
|
|
|
|
2018-04-06 16:07:14 -04:00
|
|
|
// A central context to stop all go routines running on
|
2016-03-28 20:28:57 -04:00
|
|
|
// behalf of the NetworkDB instance.
|
2018-04-06 16:07:14 -04:00
|
|
|
ctx context.Context
|
|
|
|
cancelCtx context.CancelFunc
|
2016-03-28 20:28:57 -04:00
|
|
|
|
|
|
|
// A central broadcaster for all local watchers watching table
|
|
|
|
// events.
|
|
|
|
broadcaster *events.Broadcaster
|
|
|
|
|
|
|
|
// List of all tickers which needed to be stopped when
|
|
|
|
// cleaning up.
|
|
|
|
tickers []*time.Ticker
|
2016-06-04 05:10:19 -04:00
|
|
|
|
|
|
|
// Reference to the memberlist's keyring to add & remove keys
|
|
|
|
keyring *memberlist.Keyring
|
2017-05-22 21:36:43 -04:00
|
|
|
|
|
|
|
// bootStrapIP is the list of IPs that can be used to bootstrap
|
|
|
|
// the gossip.
|
2018-06-21 20:57:35 -04:00
|
|
|
bootStrapIP []string
|
2017-07-10 15:05:58 -04:00
|
|
|
|
|
|
|
// lastStatsTimestamp is the last timestamp when the stats got printed
|
|
|
|
lastStatsTimestamp time.Time
|
|
|
|
|
|
|
|
// lastHealthTimestamp is the last timestamp when the health score got printed
|
|
|
|
lastHealthTimestamp time.Time
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
2016-10-25 17:52:36 -04:00
|
|
|
// PeerInfo represents the peer (gossip cluster) nodes of a network
|
|
|
|
type PeerInfo struct {
|
|
|
|
Name string
|
|
|
|
IP string
|
|
|
|
}
|
|
|
|
|
2017-07-10 16:45:37 -04:00
|
|
|
// PeerClusterInfo represents the peer (gossip cluster) nodes
|
|
|
|
type PeerClusterInfo struct {
|
|
|
|
PeerInfo
|
|
|
|
}
|
|
|
|
|
2016-09-15 01:24:14 -04:00
|
|
|
type node struct {
|
|
|
|
memberlist.Node
|
|
|
|
ltime serf.LamportTime
|
2016-09-30 17:03:10 -04:00
|
|
|
// Number of hours left before the reaper removes the node
|
|
|
|
reapTime time.Duration
|
2016-09-15 01:24:14 -04:00
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
// network describes the node/network attachment.
|
|
|
|
type network struct {
|
|
|
|
// Network ID
|
|
|
|
id string
|
|
|
|
|
|
|
|
// Lamport time for the latest state of the entry.
|
|
|
|
ltime serf.LamportTime
|
|
|
|
|
2018-07-02 19:36:19 -04:00
|
|
|
// Gets set to true after the first bulk sync happens
|
|
|
|
inSync bool
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
// Node leave is in progress.
|
|
|
|
leaving bool
|
|
|
|
|
2016-09-27 19:38:47 -04:00
|
|
|
// Number of seconds still left before a deleted network entry gets
|
|
|
|
// removed from networkDB
|
|
|
|
reapTime time.Duration
|
2016-03-28 20:28:57 -04:00
|
|
|
|
|
|
|
// The broadcast queue for table event gossip. This is only
|
|
|
|
// initialized for this node's network attachment entries.
|
|
|
|
tableBroadcasts *memberlist.TransmitLimitedQueue
|
2017-07-10 15:05:58 -04:00
|
|
|
|
|
|
|
// Number of gossip messages sent related to this network during the last stats collection period
|
|
|
|
qMessagesSent int
|
2017-09-19 16:42:35 -04:00
|
|
|
|
|
|
|
// Number of entries on the network. This value is the sum of all the entries of all the tables of a specific network.
|
|
|
|
// Its use is for statistics purposes. It keep tracks of database size and is printed per network every StatsPrintPeriod
|
|
|
|
// interval
|
|
|
|
entriesNumber int
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
2017-05-21 22:25:52 -04:00
|
|
|
// Config represents the configuration of the networkdb instance and
|
2016-03-28 20:28:57 -04:00
|
|
|
// can be passed by the caller.
|
|
|
|
type Config struct {
|
2017-09-11 14:35:18 -04:00
|
|
|
// NodeID is the node unique identifier of the node when is part of the cluster
|
|
|
|
NodeID string
|
|
|
|
|
|
|
|
// Hostname is the node hostname.
|
|
|
|
Hostname string
|
2016-03-28 20:28:57 -04:00
|
|
|
|
2016-09-22 14:38:35 -04:00
|
|
|
// BindAddr is the IP on which networkdb listens. It can be
|
|
|
|
// 0.0.0.0 to listen on all addresses on the host.
|
|
|
|
BindAddr string
|
|
|
|
|
2016-07-19 21:17:30 -04:00
|
|
|
// AdvertiseAddr is the node's IP address that we advertise for
|
2016-03-28 20:28:57 -04:00
|
|
|
// cluster communication.
|
2016-07-19 21:17:30 -04:00
|
|
|
AdvertiseAddr string
|
2016-03-28 20:28:57 -04:00
|
|
|
|
|
|
|
// BindPort is the local node's port to which we bind to for
|
|
|
|
// cluster communication.
|
|
|
|
BindPort int
|
2016-06-04 05:10:19 -04:00
|
|
|
|
|
|
|
// Keys to be added to the Keyring of the memberlist. Key at index
|
|
|
|
// 0 is the primary key
|
|
|
|
Keys [][]byte
|
2017-07-10 15:05:58 -04:00
|
|
|
|
|
|
|
// PacketBufferSize is the maximum number of bytes that memberlist will
|
|
|
|
// put in a packet (this will be for UDP packets by default with a NetTransport).
|
|
|
|
// A safe value for this is typically 1400 bytes (which is the default). However,
|
|
|
|
// depending on your network's MTU (Maximum Transmission Unit) you may
|
|
|
|
// be able to increase this to get more content into each gossip packet.
|
|
|
|
PacketBufferSize int
|
|
|
|
|
2017-09-26 13:29:33 -04:00
|
|
|
// reapEntryInterval duration of a deleted entry before being garbage collected
|
|
|
|
reapEntryInterval time.Duration
|
|
|
|
|
|
|
|
// reapNetworkInterval duration of a delted network before being garbage collected
|
|
|
|
// NOTE this MUST always be higher than reapEntryInterval
|
|
|
|
reapNetworkInterval time.Duration
|
|
|
|
|
2021-07-12 05:12:56 -04:00
|
|
|
// rejoinClusterDuration represents retryJoin timeout used by rejoinClusterBootStrap.
|
|
|
|
// Default is 10sec.
|
|
|
|
rejoinClusterDuration time.Duration
|
|
|
|
|
|
|
|
// rejoinClusterInterval represents interval on which rejoinClusterBootStrap runs.
|
|
|
|
// Default is 60sec.
|
|
|
|
rejoinClusterInterval time.Duration
|
|
|
|
|
2017-07-10 15:05:58 -04:00
|
|
|
// StatsPrintPeriod the period to use to print queue stats
|
|
|
|
// Default is 5min
|
|
|
|
StatsPrintPeriod time.Duration
|
|
|
|
|
|
|
|
// HealthPrintPeriod the period to use to print the health score
|
|
|
|
// Default is 1min
|
|
|
|
HealthPrintPeriod time.Duration
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// entry defines a table entry
|
|
|
|
type entry struct {
|
|
|
|
// node from which this entry was learned.
|
|
|
|
node string
|
|
|
|
|
|
|
|
// Lamport time for the most recent update to the entry
|
|
|
|
ltime serf.LamportTime
|
|
|
|
|
|
|
|
// Opaque value store in the entry
|
|
|
|
value []byte
|
|
|
|
|
|
|
|
// Deleting the entry is in progress. All entries linger in
|
|
|
|
// the cluster for certain amount of time after deletion.
|
|
|
|
deleting bool
|
|
|
|
|
2016-09-27 19:38:47 -04:00
|
|
|
// Number of seconds still left before a deleted table entry gets
|
|
|
|
// removed from networkDB
|
|
|
|
reapTime time.Duration
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
2017-07-10 15:05:58 -04:00
|
|
|
// DefaultConfig returns a NetworkDB config with default values
|
|
|
|
func DefaultConfig() *Config {
|
|
|
|
hostname, _ := os.Hostname()
|
|
|
|
return &Config{
|
2021-07-12 05:12:56 -04:00
|
|
|
NodeID: stringid.TruncateID(stringid.GenerateRandomID()),
|
|
|
|
Hostname: hostname,
|
|
|
|
BindAddr: "0.0.0.0",
|
|
|
|
PacketBufferSize: 1400,
|
|
|
|
StatsPrintPeriod: 5 * time.Minute,
|
|
|
|
HealthPrintPeriod: 1 * time.Minute,
|
|
|
|
reapEntryInterval: 30 * time.Minute,
|
|
|
|
rejoinClusterDuration: 10 * time.Second,
|
|
|
|
rejoinClusterInterval: 60 * time.Second,
|
2017-07-10 15:05:58 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
// New creates a new instance of NetworkDB using the Config passed by
|
|
|
|
// the caller.
|
|
|
|
func New(c *Config) (*NetworkDB, error) {
|
2017-09-26 13:29:33 -04:00
|
|
|
// The garbage collection logic for entries leverage the presence of the network.
|
|
|
|
// For this reason the expiration time of the network is put slightly higher than the entry expiration so that
|
|
|
|
// there is at least 5 extra cycle to make sure that all the entries are properly deleted before deleting the network.
|
|
|
|
c.reapNetworkInterval = c.reapEntryInterval + 5*reapPeriod
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
nDB := &NetworkDB{
|
|
|
|
config: c,
|
|
|
|
indexes: make(map[int]*radix.Tree),
|
|
|
|
networks: make(map[string]map[string]*network),
|
2016-09-15 01:24:14 -04:00
|
|
|
nodes: make(map[string]*node),
|
|
|
|
failedNodes: make(map[string]*node),
|
|
|
|
leftNodes: make(map[string]*node),
|
2016-03-28 20:28:57 -04:00
|
|
|
networkNodes: make(map[string][]string),
|
|
|
|
bulkSyncAckTbl: make(map[string]chan struct{}),
|
|
|
|
broadcaster: events.NewBroadcaster(),
|
|
|
|
}
|
|
|
|
|
|
|
|
nDB.indexes[byTable] = radix.New()
|
|
|
|
nDB.indexes[byNetwork] = radix.New()
|
|
|
|
|
2017-09-26 13:29:33 -04:00
|
|
|
logrus.Infof("New memberlist node - Node:%v will use memberlist nodeID:%v with config:%+v", c.Hostname, c.NodeID, c)
|
2016-03-28 20:28:57 -04:00
|
|
|
if err := nDB.clusterInit(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nDB, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Join joins this NetworkDB instance with a list of peer NetworkDB
|
|
|
|
// instances passed by the caller in the form of addr:port
|
|
|
|
func (nDB *NetworkDB) Join(members []string) error {
|
2017-05-22 21:36:43 -04:00
|
|
|
nDB.Lock()
|
2018-06-21 20:57:35 -04:00
|
|
|
nDB.bootStrapIP = append([]string(nil), members...)
|
|
|
|
logrus.Infof("The new bootstrap node list is:%v", nDB.bootStrapIP)
|
2017-05-22 21:36:43 -04:00
|
|
|
nDB.Unlock()
|
2016-03-28 20:28:57 -04:00
|
|
|
return nDB.clusterJoin(members)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close destroys this NetworkDB instance by leave the cluster,
|
|
|
|
// stopping timers, canceling goroutines etc.
|
|
|
|
func (nDB *NetworkDB) Close() {
|
|
|
|
if err := nDB.clusterLeave(); err != nil {
|
2017-09-11 14:35:18 -04:00
|
|
|
logrus.Errorf("%v(%v) Could not close DB: %v", nDB.config.Hostname, nDB.config.NodeID, err)
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
2017-09-25 22:05:18 -04:00
|
|
|
|
|
|
|
//Avoid (*Broadcaster).run goroutine leak
|
|
|
|
nDB.broadcaster.Close()
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
2017-07-10 16:45:37 -04:00
|
|
|
// ClusterPeers returns all the gossip cluster peers.
|
|
|
|
func (nDB *NetworkDB) ClusterPeers() []PeerInfo {
|
|
|
|
nDB.RLock()
|
|
|
|
defer nDB.RUnlock()
|
|
|
|
peers := make([]PeerInfo, 0, len(nDB.nodes))
|
|
|
|
for _, node := range nDB.nodes {
|
|
|
|
peers = append(peers, PeerInfo{
|
|
|
|
Name: node.Name,
|
|
|
|
IP: node.Node.Addr.String(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return peers
|
|
|
|
}
|
|
|
|
|
2016-10-25 17:52:36 -04:00
|
|
|
// Peers returns the gossip peers for a given network.
|
|
|
|
func (nDB *NetworkDB) Peers(nid string) []PeerInfo {
|
|
|
|
nDB.RLock()
|
|
|
|
defer nDB.RUnlock()
|
|
|
|
peers := make([]PeerInfo, 0, len(nDB.networkNodes[nid]))
|
|
|
|
for _, nodeName := range nDB.networkNodes[nid] {
|
2016-12-05 03:58:59 -05:00
|
|
|
if node, ok := nDB.nodes[nodeName]; ok {
|
|
|
|
peers = append(peers, PeerInfo{
|
|
|
|
Name: node.Name,
|
|
|
|
IP: node.Addr.String(),
|
|
|
|
})
|
2017-11-16 19:30:27 -05:00
|
|
|
} else {
|
|
|
|
// Added for testing purposes, this condition should never happen else mean that the network list
|
|
|
|
// is out of sync with the node list
|
2018-03-23 13:19:02 -04:00
|
|
|
peers = append(peers, PeerInfo{Name: nodeName, IP: "unknown"})
|
2016-12-05 03:58:59 -05:00
|
|
|
}
|
2016-10-25 17:52:36 -04:00
|
|
|
}
|
|
|
|
return peers
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
// GetEntry retrieves the value of a table entry in a given (network,
|
|
|
|
// table, key) tuple
|
|
|
|
func (nDB *NetworkDB) GetEntry(tname, nid, key string) ([]byte, error) {
|
2017-12-21 17:42:47 -05:00
|
|
|
nDB.RLock()
|
|
|
|
defer nDB.RUnlock()
|
2016-03-28 20:28:57 -04:00
|
|
|
entry, err := nDB.getEntry(tname, nid, key)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-12-06 14:21:51 -05:00
|
|
|
if entry != nil && entry.deleting {
|
|
|
|
return nil, types.NotFoundErrorf("entry in table %s network id %s and key %s deleted and pending garbage collection", tname, nid, key)
|
|
|
|
}
|
2016-03-28 20:28:57 -04:00
|
|
|
|
|
|
|
return entry.value, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (nDB *NetworkDB) getEntry(tname, nid, key string) (*entry, error) {
|
|
|
|
e, ok := nDB.indexes[byTable].Get(fmt.Sprintf("/%s/%s/%s", tname, nid, key))
|
|
|
|
if !ok {
|
2016-08-21 01:55:00 -04:00
|
|
|
return nil, types.NotFoundErrorf("could not get entry in table %s with network id %s and key %s", tname, nid, key)
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return e.(*entry), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateEntry creates a table entry in NetworkDB for given (network,
|
|
|
|
// table, key) tuple and if the NetworkDB is part of the cluster
|
2016-11-28 13:44:45 -05:00
|
|
|
// propagates this event to the cluster. It is an error to create an
|
2016-03-28 20:28:57 -04:00
|
|
|
// entry for the same tuple for which there is already an existing
|
2016-08-21 01:55:00 -04:00
|
|
|
// entry unless the current entry is deleting state.
|
2016-03-28 20:28:57 -04:00
|
|
|
func (nDB *NetworkDB) CreateEntry(tname, nid, key string, value []byte) error {
|
2017-12-21 17:42:47 -05:00
|
|
|
nDB.Lock()
|
2016-08-21 01:55:00 -04:00
|
|
|
oldEntry, err := nDB.getEntry(tname, nid, key)
|
2017-12-21 17:42:47 -05:00
|
|
|
if err == nil || (oldEntry != nil && !oldEntry.deleting) {
|
|
|
|
nDB.Unlock()
|
2016-08-21 01:55:00 -04:00
|
|
|
return fmt.Errorf("cannot create entry in table %s with network id %s and key %s, already exists", tname, nid, key)
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
entry := &entry{
|
|
|
|
ltime: nDB.tableClock.Increment(),
|
2017-09-11 14:35:18 -04:00
|
|
|
node: nDB.config.NodeID,
|
2016-03-28 20:28:57 -04:00
|
|
|
value: value,
|
|
|
|
}
|
|
|
|
|
2017-12-21 17:42:47 -05:00
|
|
|
nDB.createOrUpdateEntry(nid, tname, key, entry)
|
|
|
|
nDB.Unlock()
|
|
|
|
|
2016-05-17 00:42:35 -04:00
|
|
|
if err := nDB.sendTableEvent(TableEventTypeCreate, nid, tname, key, entry); err != nil {
|
2016-11-11 03:42:34 -05:00
|
|
|
return fmt.Errorf("cannot send create event for table %s, %v", tname, err)
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateEntry updates a table entry in NetworkDB for given (network,
|
|
|
|
// table, key) tuple and if the NetworkDB is part of the cluster
|
2016-11-28 13:44:45 -05:00
|
|
|
// propagates this event to the cluster. It is an error to update a
|
2016-03-28 20:28:57 -04:00
|
|
|
// non-existent entry.
|
|
|
|
func (nDB *NetworkDB) UpdateEntry(tname, nid, key string, value []byte) error {
|
2017-12-21 17:42:47 -05:00
|
|
|
nDB.Lock()
|
|
|
|
if _, err := nDB.getEntry(tname, nid, key); err != nil {
|
|
|
|
nDB.Unlock()
|
2016-03-28 20:28:57 -04:00
|
|
|
return fmt.Errorf("cannot update entry as the entry in table %s with network id %s and key %s does not exist", tname, nid, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
entry := &entry{
|
|
|
|
ltime: nDB.tableClock.Increment(),
|
2017-09-11 14:35:18 -04:00
|
|
|
node: nDB.config.NodeID,
|
2016-03-28 20:28:57 -04:00
|
|
|
value: value,
|
|
|
|
}
|
|
|
|
|
2017-12-21 17:42:47 -05:00
|
|
|
nDB.createOrUpdateEntry(nid, tname, key, entry)
|
|
|
|
nDB.Unlock()
|
|
|
|
|
2016-05-17 00:42:35 -04:00
|
|
|
if err := nDB.sendTableEvent(TableEventTypeUpdate, nid, tname, key, entry); err != nil {
|
2016-03-28 20:28:57 -04:00
|
|
|
return fmt.Errorf("cannot send table update event: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-01 13:13:01 -05:00
|
|
|
// TableElem elem
|
|
|
|
type TableElem struct {
|
|
|
|
Value []byte
|
|
|
|
owner string
|
|
|
|
}
|
|
|
|
|
2017-03-02 02:57:37 -05:00
|
|
|
// GetTableByNetwork walks the networkdb by the give table and network id and
|
|
|
|
// returns a map of keys and values
|
2017-12-01 13:13:01 -05:00
|
|
|
func (nDB *NetworkDB) GetTableByNetwork(tname, nid string) map[string]*TableElem {
|
|
|
|
entries := make(map[string]*TableElem)
|
2017-03-02 02:57:37 -05:00
|
|
|
nDB.indexes[byTable].WalkPrefix(fmt.Sprintf("/%s/%s", tname, nid), func(k string, v interface{}) bool {
|
|
|
|
entry := v.(*entry)
|
|
|
|
if entry.deleting {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
key := k[strings.LastIndex(k, "/")+1:]
|
2017-12-01 13:13:01 -05:00
|
|
|
entries[key] = &TableElem{Value: entry.value, owner: entry.node}
|
2017-03-02 02:57:37 -05:00
|
|
|
return false
|
|
|
|
})
|
|
|
|
return entries
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
// DeleteEntry deletes a table entry in NetworkDB for given (network,
|
|
|
|
// table, key) tuple and if the NetworkDB is part of the cluster
|
2016-11-28 13:44:45 -05:00
|
|
|
// propagates this event to the cluster.
|
2016-03-28 20:28:57 -04:00
|
|
|
func (nDB *NetworkDB) DeleteEntry(tname, nid, key string) error {
|
2017-12-21 17:42:47 -05:00
|
|
|
nDB.Lock()
|
|
|
|
oldEntry, err := nDB.getEntry(tname, nid, key)
|
|
|
|
if err != nil || oldEntry == nil || oldEntry.deleting {
|
|
|
|
nDB.Unlock()
|
|
|
|
return fmt.Errorf("cannot delete entry %s with network id %s and key %s "+
|
|
|
|
"does not exist or is already being deleted", tname, nid, key)
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
entry := &entry{
|
2016-09-27 19:38:47 -04:00
|
|
|
ltime: nDB.tableClock.Increment(),
|
2017-09-11 14:35:18 -04:00
|
|
|
node: nDB.config.NodeID,
|
2017-12-21 17:42:47 -05:00
|
|
|
value: oldEntry.value,
|
2016-09-27 19:38:47 -04:00
|
|
|
deleting: true,
|
2017-09-26 13:29:33 -04:00
|
|
|
reapTime: nDB.config.reapEntryInterval,
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
2017-12-21 17:42:47 -05:00
|
|
|
nDB.createOrUpdateEntry(nid, tname, key, entry)
|
|
|
|
nDB.Unlock()
|
|
|
|
|
2016-05-17 00:42:35 -04:00
|
|
|
if err := nDB.sendTableEvent(TableEventTypeDelete, nid, tname, key, entry); err != nil {
|
2016-03-28 20:28:57 -04:00
|
|
|
return fmt.Errorf("cannot send table delete event: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-01 16:56:15 -05:00
|
|
|
func (nDB *NetworkDB) deleteNodeFromNetworks(deletedNode string) {
|
2016-06-14 15:39:38 -04:00
|
|
|
for nid, nodes := range nDB.networkNodes {
|
|
|
|
updatedNodes := make([]string, 0, len(nodes))
|
|
|
|
for _, node := range nodes {
|
|
|
|
if node == deletedNode {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
updatedNodes = append(updatedNodes, node)
|
|
|
|
}
|
|
|
|
|
|
|
|
nDB.networkNodes[nid] = updatedNodes
|
|
|
|
}
|
2016-09-15 01:24:14 -04:00
|
|
|
|
|
|
|
delete(nDB.networks, deletedNode)
|
2016-06-14 15:39:38 -04:00
|
|
|
}
|
|
|
|
|
2017-07-12 10:47:36 -04:00
|
|
|
// deleteNodeNetworkEntries is called in 2 conditions with 2 different outcomes:
|
|
|
|
// 1) when a notification is coming of a node leaving the network
|
2022-07-08 12:27:07 -04:00
|
|
|
// - Walk all the network entries and mark the leaving node's entries for deletion
|
|
|
|
// These will be garbage collected when the reap timer will expire
|
|
|
|
//
|
2017-07-12 10:47:36 -04:00
|
|
|
// 2) when the local node is leaving the network
|
2022-07-08 12:27:07 -04:00
|
|
|
// - Walk all the network entries:
|
|
|
|
// A) if the entry is owned by the local node
|
|
|
|
// then we will mark it for deletion. This will ensure that if a node did not
|
|
|
|
// yet received the notification that the local node is leaving, will be aware
|
|
|
|
// of the entries to be deleted.
|
|
|
|
// B) if the entry is owned by a remote node, then we can safely delete it. This
|
|
|
|
// ensures that if we join back this network as we receive the CREATE event for
|
|
|
|
// entries owned by remote nodes, we will accept them and we notify the application
|
2017-03-31 04:49:03 -04:00
|
|
|
func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) {
|
2017-07-12 10:47:36 -04:00
|
|
|
// Indicates if the delete is triggered for the local node
|
2017-09-11 14:35:18 -04:00
|
|
|
isNodeLocal := node == nDB.config.NodeID
|
2017-07-12 10:47:36 -04:00
|
|
|
|
2022-02-15 06:56:23 -05:00
|
|
|
nDB.indexes[byNetwork].WalkPrefix("/"+nid,
|
2017-03-31 04:49:03 -04:00
|
|
|
func(path string, v interface{}) bool {
|
|
|
|
oldEntry := v.(*entry)
|
|
|
|
params := strings.Split(path[1:], "/")
|
|
|
|
nid := params[0]
|
|
|
|
tname := params[1]
|
|
|
|
key := params[2]
|
|
|
|
|
2017-07-12 10:47:36 -04:00
|
|
|
// If the entry is owned by a remote node and this node is not leaving the network
|
|
|
|
if oldEntry.node != node && !isNodeLocal {
|
|
|
|
// Don't do anything because the event is triggered for a node that does not own this entry
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this entry is already marked for deletion and this node is not leaving the network
|
|
|
|
if oldEntry.deleting && !isNodeLocal {
|
|
|
|
// Don't do anything this entry will be already garbage collected using the old reapTime
|
2017-03-31 04:49:03 -04:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
entry := &entry{
|
|
|
|
ltime: oldEntry.ltime,
|
2017-09-19 20:36:00 -04:00
|
|
|
node: oldEntry.node,
|
2017-03-31 04:49:03 -04:00
|
|
|
value: oldEntry.value,
|
|
|
|
deleting: true,
|
2017-09-26 13:29:33 -04:00
|
|
|
reapTime: nDB.config.reapEntryInterval,
|
2017-03-31 04:49:03 -04:00
|
|
|
}
|
|
|
|
|
2017-07-12 10:47:36 -04:00
|
|
|
// we arrived at this point in 2 cases:
|
|
|
|
// 1) this entry is owned by the node that is leaving the network
|
|
|
|
// 2) the local node is leaving the network
|
|
|
|
if oldEntry.node == node {
|
|
|
|
if isNodeLocal {
|
|
|
|
// TODO fcrisciani: this can be removed if there is no way to leave the network
|
|
|
|
// without doing a delete of all the objects
|
|
|
|
entry.ltime++
|
|
|
|
}
|
2017-10-13 00:41:29 -04:00
|
|
|
|
|
|
|
if !oldEntry.deleting {
|
|
|
|
nDB.createOrUpdateEntry(nid, tname, key, entry)
|
|
|
|
}
|
2017-07-12 10:47:36 -04:00
|
|
|
} else {
|
|
|
|
// the local node is leaving the network, all the entries of remote nodes can be safely removed
|
2017-09-19 16:42:35 -04:00
|
|
|
nDB.deleteEntry(nid, tname, key)
|
2017-07-12 10:47:36 -04:00
|
|
|
}
|
2017-03-31 04:49:03 -04:00
|
|
|
|
2017-10-02 20:05:12 -04:00
|
|
|
// Notify to the upper layer only entries not already marked for deletion
|
|
|
|
if !oldEntry.deleting {
|
|
|
|
nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, entry.value))
|
|
|
|
}
|
2017-03-31 04:49:03 -04:00
|
|
|
return false
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
func (nDB *NetworkDB) deleteNodeTableEntries(node string) {
|
|
|
|
nDB.indexes[byTable].Walk(func(path string, v interface{}) bool {
|
|
|
|
oldEntry := v.(*entry)
|
|
|
|
if oldEntry.node != node {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
params := strings.Split(path[1:], "/")
|
|
|
|
tname := params[0]
|
|
|
|
nid := params[1]
|
|
|
|
key := params[2]
|
|
|
|
|
2017-09-19 16:42:35 -04:00
|
|
|
nDB.deleteEntry(nid, tname, key)
|
2016-08-18 16:57:24 -04:00
|
|
|
|
2017-12-01 16:56:15 -05:00
|
|
|
if !oldEntry.deleting {
|
|
|
|
nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, oldEntry.value))
|
|
|
|
}
|
2016-03-28 20:28:57 -04:00
|
|
|
return false
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// WalkTable walks a single table in NetworkDB and invokes the passed
|
|
|
|
// function for each entry in the table passing the network, key,
|
|
|
|
// value. The walk stops if the passed function returns a true.
|
2017-08-05 11:42:20 -04:00
|
|
|
func (nDB *NetworkDB) WalkTable(tname string, fn func(string, string, []byte, bool) bool) error {
|
2016-03-28 20:28:57 -04:00
|
|
|
nDB.RLock()
|
|
|
|
values := make(map[string]interface{})
|
2022-02-15 06:56:23 -05:00
|
|
|
nDB.indexes[byTable].WalkPrefix("/"+tname, func(path string, v interface{}) bool {
|
2016-03-28 20:28:57 -04:00
|
|
|
values[path] = v
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
nDB.RUnlock()
|
|
|
|
|
|
|
|
for k, v := range values {
|
|
|
|
params := strings.Split(k[1:], "/")
|
|
|
|
nid := params[1]
|
|
|
|
key := params[2]
|
2017-08-05 11:42:20 -04:00
|
|
|
if fn(nid, key, v.(*entry).value, v.(*entry).deleting) {
|
2016-03-28 20:28:57 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-28 13:44:45 -05:00
|
|
|
// JoinNetwork joins this node to a given network and propagates this
|
2016-03-28 20:28:57 -04:00
|
|
|
// event across the cluster. This triggers this node joining the
|
|
|
|
// sub-cluster of this network and participates in the network-scoped
|
|
|
|
// gossip and bulk sync for this network.
|
|
|
|
func (nDB *NetworkDB) JoinNetwork(nid string) error {
|
|
|
|
ltime := nDB.networkClock.Increment()
|
|
|
|
|
|
|
|
nDB.Lock()
|
2017-09-11 14:35:18 -04:00
|
|
|
nodeNetworks, ok := nDB.networks[nDB.config.NodeID]
|
2016-03-28 20:28:57 -04:00
|
|
|
if !ok {
|
|
|
|
nodeNetworks = make(map[string]*network)
|
2017-09-11 14:35:18 -04:00
|
|
|
nDB.networks[nDB.config.NodeID] = nodeNetworks
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
2017-09-19 16:42:35 -04:00
|
|
|
n, ok := nodeNetworks[nid]
|
|
|
|
var entries int
|
|
|
|
if ok {
|
|
|
|
entries = n.entriesNumber
|
|
|
|
}
|
|
|
|
nodeNetworks[nid] = &network{id: nid, ltime: ltime, entriesNumber: entries}
|
2016-03-28 20:28:57 -04:00
|
|
|
nodeNetworks[nid].tableBroadcasts = &memberlist.TransmitLimitedQueue{
|
|
|
|
NumNodes: func() int {
|
2017-11-16 19:30:27 -05:00
|
|
|
//TODO fcrisciani this can be optimized maybe avoiding the lock?
|
|
|
|
// this call is done each GetBroadcasts call to evaluate the number of
|
|
|
|
// replicas for the message
|
2016-08-05 16:54:17 -04:00
|
|
|
nDB.RLock()
|
2017-07-10 15:05:58 -04:00
|
|
|
defer nDB.RUnlock()
|
|
|
|
return len(nDB.networkNodes[nid])
|
2016-03-28 20:28:57 -04:00
|
|
|
},
|
|
|
|
RetransmitMult: 4,
|
|
|
|
}
|
2017-09-11 14:35:18 -04:00
|
|
|
nDB.addNetworkNode(nid, nDB.config.NodeID)
|
2016-06-14 15:39:38 -04:00
|
|
|
networkNodes := nDB.networkNodes[nid]
|
2018-07-02 19:36:19 -04:00
|
|
|
n = nodeNetworks[nid]
|
2016-03-28 20:28:57 -04:00
|
|
|
nDB.Unlock()
|
|
|
|
|
2016-05-17 00:42:35 -04:00
|
|
|
if err := nDB.sendNetworkEvent(nid, NetworkEventTypeJoin, ltime); err != nil {
|
2016-04-23 16:26:34 -04:00
|
|
|
return fmt.Errorf("failed to send leave network event for %s: %v", nid, err)
|
|
|
|
}
|
|
|
|
|
2017-09-11 14:35:18 -04:00
|
|
|
logrus.Debugf("%v(%v): joined network %s", nDB.config.Hostname, nDB.config.NodeID, nid)
|
2016-09-15 01:24:14 -04:00
|
|
|
if _, err := nDB.bulkSync(networkNodes, true); err != nil {
|
2016-03-28 20:28:57 -04:00
|
|
|
logrus.Errorf("Error bulk syncing while joining network %s: %v", nid, err)
|
|
|
|
}
|
|
|
|
|
2018-07-02 19:36:19 -04:00
|
|
|
// Mark the network as being synced
|
|
|
|
// note this is a best effort, we are not checking the result of the bulk sync
|
|
|
|
nDB.Lock()
|
|
|
|
n.inSync = true
|
|
|
|
nDB.Unlock()
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-28 13:44:45 -05:00
|
|
|
// LeaveNetwork leaves this node from a given network and propagates
|
2016-03-28 20:28:57 -04:00
|
|
|
// this event across the cluster. This triggers this node leaving the
|
|
|
|
// sub-cluster of this network and as a result will no longer
|
|
|
|
// participate in the network-scoped gossip and bulk sync for this
|
2016-08-10 15:44:05 -04:00
|
|
|
// network. Also remove all the table entries for this network from
|
|
|
|
// networkdb
|
2016-03-28 20:28:57 -04:00
|
|
|
func (nDB *NetworkDB) LeaveNetwork(nid string) error {
|
|
|
|
ltime := nDB.networkClock.Increment()
|
2016-05-17 00:42:35 -04:00
|
|
|
if err := nDB.sendNetworkEvent(nid, NetworkEventTypeLeave, ltime); err != nil {
|
2016-03-28 20:28:57 -04:00
|
|
|
return fmt.Errorf("failed to send leave network event for %s: %v", nid, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
nDB.Lock()
|
|
|
|
defer nDB.Unlock()
|
2016-08-10 15:44:05 -04:00
|
|
|
|
2017-07-12 10:47:36 -04:00
|
|
|
// Remove myself from the list of the nodes participating to the network
|
2017-09-11 14:35:18 -04:00
|
|
|
nDB.deleteNetworkNode(nid, nDB.config.NodeID)
|
2017-07-05 14:02:04 -04:00
|
|
|
|
2017-07-12 10:47:36 -04:00
|
|
|
// Update all the local entries marking them for deletion and delete all the remote entries
|
2017-09-11 14:35:18 -04:00
|
|
|
nDB.deleteNodeNetworkEntries(nid, nDB.config.NodeID)
|
2016-08-10 15:44:05 -04:00
|
|
|
|
2017-09-11 14:35:18 -04:00
|
|
|
nodeNetworks, ok := nDB.networks[nDB.config.NodeID]
|
2016-03-28 20:28:57 -04:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("could not find self node for network %s while trying to leave", nid)
|
|
|
|
}
|
|
|
|
|
|
|
|
n, ok := nodeNetworks[nid]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("could not find network %s while trying to leave", nid)
|
|
|
|
}
|
|
|
|
|
2017-09-11 14:35:18 -04:00
|
|
|
logrus.Debugf("%v(%v): leaving network %s", nDB.config.Hostname, nDB.config.NodeID, nid)
|
2016-03-28 20:28:57 -04:00
|
|
|
n.ltime = ltime
|
2017-09-26 13:29:33 -04:00
|
|
|
n.reapTime = nDB.config.reapNetworkInterval
|
2016-03-28 20:28:57 -04:00
|
|
|
n.leaving = true
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-19 20:18:15 -04:00
|
|
|
// addNetworkNode adds the node to the list of nodes which participate
|
|
|
|
// in the passed network only if it is not already present. Caller
|
|
|
|
// should hold the NetworkDB lock while calling this
|
|
|
|
func (nDB *NetworkDB) addNetworkNode(nid string, nodeName string) {
|
|
|
|
nodes := nDB.networkNodes[nid]
|
|
|
|
for _, node := range nodes {
|
|
|
|
if node == nodeName {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nDB.networkNodes[nid] = append(nDB.networkNodes[nid], nodeName)
|
|
|
|
}
|
|
|
|
|
2016-03-28 20:28:57 -04:00
|
|
|
// Deletes the node from the list of nodes which participate in the
|
|
|
|
// passed network. Caller should hold the NetworkDB lock while calling
|
|
|
|
// this
|
|
|
|
func (nDB *NetworkDB) deleteNetworkNode(nid string, nodeName string) {
|
2017-07-05 14:02:04 -04:00
|
|
|
nodes, ok := nDB.networkNodes[nid]
|
|
|
|
if !ok || len(nodes) == 0 {
|
|
|
|
return
|
|
|
|
}
|
2016-10-11 18:20:15 -04:00
|
|
|
newNodes := make([]string, 0, len(nodes)-1)
|
|
|
|
for _, name := range nodes {
|
2016-03-28 20:28:57 -04:00
|
|
|
if name == nodeName {
|
2016-10-11 18:20:15 -04:00
|
|
|
continue
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
2016-10-11 18:20:15 -04:00
|
|
|
newNodes = append(newNodes, name)
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
2016-10-11 18:20:15 -04:00
|
|
|
nDB.networkNodes[nid] = newNodes
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// findCommonnetworks find the networks that both this node and the
|
|
|
|
// passed node have joined.
|
|
|
|
func (nDB *NetworkDB) findCommonNetworks(nodeName string) []string {
|
|
|
|
nDB.RLock()
|
|
|
|
defer nDB.RUnlock()
|
|
|
|
|
|
|
|
var networks []string
|
2017-09-11 14:35:18 -04:00
|
|
|
for nid := range nDB.networks[nDB.config.NodeID] {
|
2016-09-15 01:24:14 -04:00
|
|
|
if n, ok := nDB.networks[nodeName][nid]; ok {
|
|
|
|
if !n.leaving {
|
|
|
|
networks = append(networks, nid)
|
|
|
|
}
|
2016-03-28 20:28:57 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return networks
|
|
|
|
}
|
2016-09-15 01:24:14 -04:00
|
|
|
|
2016-09-23 17:42:17 -04:00
|
|
|
func (nDB *NetworkDB) updateLocalNetworkTime() {
|
2016-09-15 01:24:14 -04:00
|
|
|
nDB.Lock()
|
|
|
|
defer nDB.Unlock()
|
|
|
|
|
|
|
|
ltime := nDB.networkClock.Increment()
|
2017-09-11 14:35:18 -04:00
|
|
|
for _, n := range nDB.networks[nDB.config.NodeID] {
|
2016-09-15 01:24:14 -04:00
|
|
|
n.ltime = ltime
|
|
|
|
}
|
2016-09-23 17:42:17 -04:00
|
|
|
}
|
2017-09-19 16:42:35 -04:00
|
|
|
|
|
|
|
// createOrUpdateEntry this function handles the creation or update of entries into the local
|
|
|
|
// tree store. It is also used to keep in sync the entries number of the network (all tables are aggregated)
|
|
|
|
func (nDB *NetworkDB) createOrUpdateEntry(nid, tname, key string, entry interface{}) (bool, bool) {
|
|
|
|
_, okTable := nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
|
|
|
|
_, okNetwork := nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
|
|
|
|
if !okNetwork {
|
|
|
|
// Add only if it is an insert not an update
|
2017-09-11 14:35:18 -04:00
|
|
|
n, ok := nDB.networks[nDB.config.NodeID][nid]
|
2017-09-19 20:36:00 -04:00
|
|
|
if ok {
|
|
|
|
n.entriesNumber++
|
|
|
|
}
|
2017-09-19 16:42:35 -04:00
|
|
|
}
|
|
|
|
return okTable, okNetwork
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteEntry this function handles the deletion of entries into the local tree store.
|
|
|
|
// It is also used to keep in sync the entries number of the network (all tables are aggregated)
|
|
|
|
func (nDB *NetworkDB) deleteEntry(nid, tname, key string) (bool, bool) {
|
|
|
|
_, okTable := nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key))
|
|
|
|
_, okNetwork := nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key))
|
|
|
|
if okNetwork {
|
|
|
|
// Remove only if the delete is successful
|
2017-09-11 14:35:18 -04:00
|
|
|
n, ok := nDB.networks[nDB.config.NodeID][nid]
|
2017-09-19 20:36:00 -04:00
|
|
|
if ok {
|
|
|
|
n.entriesNumber--
|
|
|
|
}
|
2017-09-19 16:42:35 -04:00
|
|
|
}
|
|
|
|
return okTable, okNetwork
|
|
|
|
}
|