mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Merge pull request #34378 from fcrisciani/ln-vendoring
Vendoring libnetwork
This commit is contained in:
commit
aaee3ca6c1
25 changed files with 727 additions and 236 deletions
|
@ -27,7 +27,7 @@ github.com/imdario/mergo 0.2.1
|
||||||
golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
|
golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
|
||||||
|
|
||||||
#get libnetwork packages
|
#get libnetwork packages
|
||||||
github.com/docker/libnetwork 248fd5ea6a67f8810da322e6e7441e8de96a9045 https://github.com/dmcgowan/libnetwork.git
|
github.com/docker/libnetwork 24bb72a8dcfe0b58958414890c8f4138b644b96a
|
||||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||||
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
||||||
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||||
|
|
7
vendor/github.com/docker/libnetwork/agent.go
generated
vendored
7
vendor/github.com/docker/libnetwork/agent.go
generated
vendored
|
@ -741,11 +741,12 @@ func (n *network) addDriverWatches() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
agent.networkDB.WalkTable(table.name, func(nid, key string, value []byte) bool {
|
agent.networkDB.WalkTable(table.name, func(nid, key string, value []byte, deleted bool) bool {
|
||||||
if nid == n.ID() {
|
// skip the entries that are mark for deletion, this is safe because this function is
|
||||||
|
// called at initialization time so there is no state to delete
|
||||||
|
if nid == n.ID() && !deleted {
|
||||||
d.EventNotify(driverapi.Create, nid, table.name, key, value)
|
d.EventNotify(driverapi.Create, nid, table.name, key, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
8
vendor/github.com/docker/libnetwork/bitseq/sequence.go
generated
vendored
8
vendor/github.com/docker/libnetwork/bitseq/sequence.go
generated
vendored
|
@ -497,7 +497,10 @@ func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) {
|
||||||
// Derive the this sequence offsets
|
// Derive the this sequence offsets
|
||||||
byteOffset := byteStart - inBlockBytePos
|
byteOffset := byteStart - inBlockBytePos
|
||||||
bitOffset := inBlockBytePos*8 + bitStart
|
bitOffset := inBlockBytePos*8 + bitStart
|
||||||
|
var firstOffset uint64
|
||||||
|
if current == head {
|
||||||
|
firstOffset = byteOffset
|
||||||
|
}
|
||||||
for current != nil {
|
for current != nil {
|
||||||
if current.block != blockMAX {
|
if current.block != blockMAX {
|
||||||
bytePos, bitPos, err := current.getAvailableBit(bitOffset)
|
bytePos, bitPos, err := current.getAvailableBit(bitOffset)
|
||||||
|
@ -505,7 +508,8 @@ func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) {
|
||||||
}
|
}
|
||||||
// Moving to next block: Reset bit offset.
|
// Moving to next block: Reset bit offset.
|
||||||
bitOffset = 0
|
bitOffset = 0
|
||||||
byteOffset += current.count * blockBytes
|
byteOffset += (current.count * blockBytes) - firstOffset
|
||||||
|
firstOffset = 0
|
||||||
current = current.next
|
current = current.next
|
||||||
}
|
}
|
||||||
return invalidPos, invalidPos, ErrNoBitAvailable
|
return invalidPos, invalidPos, ErrNoBitAvailable
|
||||||
|
|
29
vendor/github.com/docker/libnetwork/common/caller.go
generated
vendored
Normal file
29
vendor/github.com/docker/libnetwork/common/caller.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func callerInfo(i int) string {
|
||||||
|
ptr, _, _, ok := runtime.Caller(i)
|
||||||
|
fName := "unknown"
|
||||||
|
if ok {
|
||||||
|
f := runtime.FuncForPC(ptr)
|
||||||
|
if f != nil {
|
||||||
|
// f.Name() is like: github.com/docker/libnetwork/common.MethodName
|
||||||
|
tmp := strings.Split(f.Name(), ".")
|
||||||
|
if len(tmp) > 0 {
|
||||||
|
fName = tmp[len(tmp)-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fName
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallerName returns the name of the function at the specified level
|
||||||
|
// level == 0 means current method name
|
||||||
|
func CallerName(level int) string {
|
||||||
|
return callerInfo(2 + level)
|
||||||
|
}
|
133
vendor/github.com/docker/libnetwork/diagnose/diagnose.go
generated
vendored
Normal file
133
vendor/github.com/docker/libnetwork/diagnose/diagnose.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
||||||
|
package diagnose
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPHandlerFunc TODO
|
||||||
|
type HTTPHandlerFunc func(interface{}, http.ResponseWriter, *http.Request)
|
||||||
|
|
||||||
|
type httpHandlerCustom struct {
|
||||||
|
ctx interface{}
|
||||||
|
F func(interface{}, http.ResponseWriter, *http.Request)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeHTTP TODO
|
||||||
|
func (h httpHandlerCustom) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
h.F(h.ctx, w, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
var diagPaths2Func = map[string]HTTPHandlerFunc{
|
||||||
|
"/": notImplemented,
|
||||||
|
"/help": help,
|
||||||
|
"/ready": ready,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server when the debug is enabled exposes a
|
||||||
|
// This data structure is protected by the Agent mutex so does not require and additional mutex here
|
||||||
|
type Server struct {
|
||||||
|
sk net.Listener
|
||||||
|
port int
|
||||||
|
mux *http.ServeMux
|
||||||
|
registeredHanders []string
|
||||||
|
sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init TODO
|
||||||
|
func (n *Server) Init() {
|
||||||
|
n.mux = http.NewServeMux()
|
||||||
|
|
||||||
|
// Register local handlers
|
||||||
|
n.RegisterHandler(n, diagPaths2Func)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterHandler TODO
|
||||||
|
func (n *Server) RegisterHandler(ctx interface{}, hdlrs map[string]HTTPHandlerFunc) {
|
||||||
|
n.Lock()
|
||||||
|
defer n.Unlock()
|
||||||
|
for path, fun := range hdlrs {
|
||||||
|
n.mux.Handle(path, httpHandlerCustom{ctx, fun})
|
||||||
|
n.registeredHanders = append(n.registeredHanders, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableDebug opens a TCP socket to debug the passed network DB
|
||||||
|
func (n *Server) EnableDebug(ip string, port int) {
|
||||||
|
n.Lock()
|
||||||
|
defer n.Unlock()
|
||||||
|
|
||||||
|
n.port = port
|
||||||
|
logrus.SetLevel(logrus.DebugLevel)
|
||||||
|
|
||||||
|
if n.sk != nil {
|
||||||
|
logrus.Infof("The server is already up and running")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Infof("Starting the server listening on %d for commands", port)
|
||||||
|
|
||||||
|
// // Create the socket
|
||||||
|
// var err error
|
||||||
|
// n.sk, err = net.Listen("tcp", listeningAddr)
|
||||||
|
// if err != nil {
|
||||||
|
// log.Fatal(err)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// go func() {
|
||||||
|
// http.Serve(n.sk, n.mux)
|
||||||
|
// }()
|
||||||
|
http.ListenAndServe(":8000", n.mux)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableDebug stop the dubug and closes the tcp socket
|
||||||
|
func (n *Server) DisableDebug() {
|
||||||
|
n.Lock()
|
||||||
|
defer n.Unlock()
|
||||||
|
n.sk.Close()
|
||||||
|
n.sk = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDebugEnable returns true when the debug is enabled
|
||||||
|
func (n *Server) IsDebugEnable() bool {
|
||||||
|
n.Lock()
|
||||||
|
defer n.Unlock()
|
||||||
|
return n.sk != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func notImplemented(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
fmt.Fprintf(w, "URL path: %s no method implemented check /help\n", r.URL.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func help(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
n, ok := ctx.(*Server)
|
||||||
|
if ok {
|
||||||
|
for _, path := range n.registeredHanders {
|
||||||
|
fmt.Fprintf(w, "%s\n", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ready(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
fmt.Fprintf(w, "OK\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// DebugHTTPForm TODO
|
||||||
|
func DebugHTTPForm(r *http.Request) {
|
||||||
|
r.ParseForm()
|
||||||
|
for k, v := range r.Form {
|
||||||
|
logrus.Debugf("Form[%q] = %q\n", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPReplyError TODO
|
||||||
|
func HTTPReplyError(w http.ResponseWriter, message, usage string) {
|
||||||
|
fmt.Fprintf(w, "%s\n", message)
|
||||||
|
if usage != "" {
|
||||||
|
fmt.Fprintf(w, "Usage: %s\n", usage)
|
||||||
|
}
|
||||||
|
}
|
5
vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go
generated
vendored
5
vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go
generated
vendored
|
@ -120,8 +120,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d.peerDbAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac,
|
d.peerAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), true, false, false, true)
|
||||||
net.ParseIP(d.advertiseAddress), true)
|
|
||||||
|
|
||||||
if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
|
if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
|
||||||
logrus.Warn(err)
|
logrus.Warn(err)
|
||||||
|
@ -205,7 +204,7 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
d.peerAdd(nid, eid, addr.IP, addr.Mask, mac, vtep, true, false, false)
|
d.peerAdd(nid, eid, addr.IP, addr.Mask, mac, vtep, true, false, false, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Leave method is invoked when a Sandbox detaches from an endpoint.
|
// Leave method is invoked when a Sandbox detaches from an endpoint.
|
||||||
|
|
9
vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
generated
vendored
9
vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
generated
vendored
|
@ -683,10 +683,12 @@ func (n *network) initSandbox(restore bool) error {
|
||||||
return fmt.Errorf("could not get network sandbox (oper %t): %v", restore, err)
|
return fmt.Errorf("could not get network sandbox (oper %t): %v", restore, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// this is needed to let the peerAdd configure the sandbox
|
||||||
n.setSandbox(sbox)
|
n.setSandbox(sbox)
|
||||||
|
|
||||||
if !restore {
|
if !restore {
|
||||||
n.driver.peerDbUpdateSandbox(n.id)
|
// Initialize the sandbox with all the peers previously received from networkdb
|
||||||
|
n.driver.initSandboxPeerDB(n.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
var nlSock *nl.NetlinkSocket
|
var nlSock *nl.NetlinkSocket
|
||||||
|
@ -765,10 +767,7 @@ func (n *network) watchMiss(nlSock *nl.NetlinkSocket) {
|
||||||
logrus.Errorf("could not resolve peer %q: %v", ip, err)
|
logrus.Errorf("could not resolve peer %q: %v", ip, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
n.driver.peerAdd(n.id, "dummy", ip, IPmask, mac, vtep, true, l2Miss, l3Miss, false)
|
||||||
if err := n.driver.peerAdd(n.id, "dummy", ip, IPmask, mac, vtep, true, l2Miss, l3Miss); err != nil {
|
|
||||||
logrus.Errorf("could not add neighbor entry for missed peer %q: %v", ip, err)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// If the gc_thresh values are lower kernel might knock off the neighor entries.
|
// If the gc_thresh values are lower kernel might knock off the neighor entries.
|
||||||
// When we get a L3 miss check if its a valid peer and reprogram the neighbor
|
// When we get a L3 miss check if its a valid peer and reprogram the neighbor
|
||||||
|
|
11
vendor/github.com/docker/libnetwork/drivers/overlay/ov_serf.go
generated
vendored
11
vendor/github.com/docker/libnetwork/drivers/overlay/ov_serf.go
generated
vendored
|
@ -120,15 +120,10 @@ func (d *driver) processEvent(u serf.UserEvent) {
|
||||||
|
|
||||||
switch action {
|
switch action {
|
||||||
case "join":
|
case "join":
|
||||||
if err := d.peerAdd(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac,
|
d.peerAdd(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac, net.ParseIP(vtepStr),
|
||||||
net.ParseIP(vtepStr), true, false, false); err != nil {
|
true, false, false, false)
|
||||||
logrus.Errorf("Peer add failed in the driver: %v\n", err)
|
|
||||||
}
|
|
||||||
case "leave":
|
case "leave":
|
||||||
if err := d.peerDelete(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac,
|
d.peerDelete(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac, net.ParseIP(vtepStr), true)
|
||||||
net.ParseIP(vtepStr), true); err != nil {
|
|
||||||
logrus.Errorf("Peer delete failed in the driver: %v\n", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
20
vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go
generated
vendored
20
vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go
generated
vendored
|
@ -3,6 +3,7 @@ package overlay
|
||||||
//go:generate protoc -I.:../../Godeps/_workspace/src/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork/drivers/overlay,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. overlay.proto
|
//go:generate protoc -I.:../../Godeps/_workspace/src/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork/drivers/overlay,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. overlay.proto
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -50,6 +51,8 @@ type driver struct {
|
||||||
joinOnce sync.Once
|
joinOnce sync.Once
|
||||||
localJoinOnce sync.Once
|
localJoinOnce sync.Once
|
||||||
keys []*key
|
keys []*key
|
||||||
|
peerOpCh chan *peerOperation
|
||||||
|
peerOpCancel context.CancelFunc
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,10 +67,16 @@ func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
|
||||||
peerDb: peerNetworkMap{
|
peerDb: peerNetworkMap{
|
||||||
mp: map[string]*peerMap{},
|
mp: map[string]*peerMap{},
|
||||||
},
|
},
|
||||||
secMap: &encrMap{nodes: map[string][]*spi{}},
|
secMap: &encrMap{nodes: map[string][]*spi{}},
|
||||||
config: config,
|
config: config,
|
||||||
|
peerOpCh: make(chan *peerOperation),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Launch the go routine for processing peer operations
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
d.peerOpCancel = cancel
|
||||||
|
go d.peerOpRoutine(ctx, d.peerOpCh)
|
||||||
|
|
||||||
if data, ok := config[netlabel.GlobalKVClient]; ok {
|
if data, ok := config[netlabel.GlobalKVClient]; ok {
|
||||||
var err error
|
var err error
|
||||||
dsc, ok := data.(discoverapi.DatastoreConfigData)
|
dsc, ok := data.(discoverapi.DatastoreConfigData)
|
||||||
|
@ -161,7 +170,7 @@ func (d *driver) restoreEndpoints() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
n.incEndpointCount()
|
n.incEndpointCount()
|
||||||
d.peerDbAdd(ep.nid, ep.id, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), true)
|
d.peerAdd(ep.nid, ep.id, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), true, false, false, true)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -170,6 +179,11 @@ func (d *driver) restoreEndpoints() error {
|
||||||
func Fini(drv driverapi.Driver) {
|
func Fini(drv driverapi.Driver) {
|
||||||
d := drv.(*driver)
|
d := drv.(*driver)
|
||||||
|
|
||||||
|
// Notify the peer go routine to return
|
||||||
|
if d.peerOpCancel != nil {
|
||||||
|
d.peerOpCancel()
|
||||||
|
}
|
||||||
|
|
||||||
if d.exitCh != nil {
|
if d.exitCh != nil {
|
||||||
waitCh := make(chan struct{})
|
waitCh := make(chan struct{})
|
||||||
|
|
||||||
|
|
155
vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go
generated
vendored
155
vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go
generated
vendored
|
@ -1,11 +1,13 @@
|
||||||
package overlay
|
package overlay
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/docker/libnetwork/common"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -59,8 +61,6 @@ func (pKey *peerKey) Scan(state fmt.ScanState, verb rune) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var peerDbWg sync.WaitGroup
|
|
||||||
|
|
||||||
func (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error {
|
func (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error {
|
||||||
d.peerDb.Lock()
|
d.peerDb.Lock()
|
||||||
nids := []string{}
|
nids := []string{}
|
||||||
|
@ -141,8 +141,6 @@ func (d *driver) peerDbSearch(nid string, peerIP net.IP) (net.HardwareAddr, net.
|
||||||
func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||||
peerMac net.HardwareAddr, vtep net.IP, isLocal bool) {
|
peerMac net.HardwareAddr, vtep net.IP, isLocal bool) {
|
||||||
|
|
||||||
peerDbWg.Wait()
|
|
||||||
|
|
||||||
d.peerDb.Lock()
|
d.peerDb.Lock()
|
||||||
pMap, ok := d.peerDb.mp[nid]
|
pMap, ok := d.peerDb.mp[nid]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -173,7 +171,6 @@ func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask
|
||||||
|
|
||||||
func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||||
peerMac net.HardwareAddr, vtep net.IP) peerEntry {
|
peerMac net.HardwareAddr, vtep net.IP) peerEntry {
|
||||||
peerDbWg.Wait()
|
|
||||||
|
|
||||||
d.peerDb.Lock()
|
d.peerDb.Lock()
|
||||||
pMap, ok := d.peerDb.mp[nid]
|
pMap, ok := d.peerDb.mp[nid]
|
||||||
|
@ -206,55 +203,109 @@ func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPM
|
||||||
return pEntry
|
return pEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *driver) peerDbUpdateSandbox(nid string) {
|
// The overlay uses a lazy initialization approach, this means that when a network is created
|
||||||
d.peerDb.Lock()
|
// and the driver registered the overlay does not allocate resources till the moment that a
|
||||||
pMap, ok := d.peerDb.mp[nid]
|
// sandbox is actually created.
|
||||||
if !ok {
|
// At the moment of this call, that happens when a sandbox is initialized, is possible that
|
||||||
d.peerDb.Unlock()
|
// networkDB has already delivered some events of peers already available on remote nodes,
|
||||||
return
|
// these peers are saved into the peerDB and this function is used to properly configure
|
||||||
}
|
// the network sandbox with all those peers that got previously notified.
|
||||||
d.peerDb.Unlock()
|
// Note also that this method sends a single message on the channel and the go routine on the
|
||||||
|
// other side, will atomically loop on the whole table of peers and will program their state
|
||||||
|
// in one single atomic operation. This is fundamental to guarantee consistency, and avoid that
|
||||||
|
// new peerAdd or peerDelete gets reordered during the sandbox init.
|
||||||
|
func (d *driver) initSandboxPeerDB(nid string) {
|
||||||
|
d.peerInit(nid)
|
||||||
|
}
|
||||||
|
|
||||||
peerDbWg.Add(1)
|
type peerOperationType int32
|
||||||
|
|
||||||
var peerOps []func()
|
const (
|
||||||
pMap.Lock()
|
peerOperationINIT peerOperationType = iota
|
||||||
for pKeyStr, pEntry := range pMap.mp {
|
peerOperationADD
|
||||||
var pKey peerKey
|
peerOperationDELETE
|
||||||
if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {
|
)
|
||||||
logrus.Errorf("peer key scan failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pEntry.isLocal {
|
type peerOperation struct {
|
||||||
continue
|
opType peerOperationType
|
||||||
}
|
networkID string
|
||||||
|
endpointID string
|
||||||
|
peerIP net.IP
|
||||||
|
peerIPMask net.IPMask
|
||||||
|
peerMac net.HardwareAddr
|
||||||
|
vtepIP net.IP
|
||||||
|
updateDB bool
|
||||||
|
l2Miss bool
|
||||||
|
l3Miss bool
|
||||||
|
localPeer bool
|
||||||
|
callerName string
|
||||||
|
}
|
||||||
|
|
||||||
// Go captures variables by reference. The pEntry could be
|
func (d *driver) peerOpRoutine(ctx context.Context, ch chan *peerOperation) {
|
||||||
// pointing to the same memory location for every iteration. Make
|
var err error
|
||||||
// a copy of pEntry before capturing it in the following closure.
|
for {
|
||||||
entry := pEntry
|
select {
|
||||||
op := func() {
|
case <-ctx.Done():
|
||||||
if err := d.peerAdd(nid, entry.eid, pKey.peerIP, entry.peerIPMask,
|
return
|
||||||
pKey.peerMac, entry.vtep,
|
case op := <-ch:
|
||||||
false, false, false); err != nil {
|
switch op.opType {
|
||||||
logrus.Errorf("peerdbupdate in sandbox failed for ip %s and mac %s: %v",
|
case peerOperationINIT:
|
||||||
pKey.peerIP, pKey.peerMac, err)
|
err = d.peerInitOp(op.networkID)
|
||||||
|
case peerOperationADD:
|
||||||
|
err = d.peerAddOp(op.networkID, op.endpointID, op.peerIP, op.peerIPMask, op.peerMac, op.vtepIP, op.updateDB, op.l2Miss, op.l3Miss, op.localPeer)
|
||||||
|
case peerOperationDELETE:
|
||||||
|
err = d.peerDeleteOp(op.networkID, op.endpointID, op.peerIP, op.peerIPMask, op.peerMac, op.vtepIP, op.localPeer)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("Peer operation failed:%s op:%v", err, op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
peerOps = append(peerOps, op)
|
|
||||||
}
|
}
|
||||||
pMap.Unlock()
|
}
|
||||||
|
|
||||||
for _, op := range peerOps {
|
func (d *driver) peerInit(nid string) {
|
||||||
op()
|
callerName := common.CallerName(1)
|
||||||
|
d.peerOpCh <- &peerOperation{
|
||||||
|
opType: peerOperationINIT,
|
||||||
|
networkID: nid,
|
||||||
|
callerName: callerName,
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
peerDbWg.Done()
|
func (d *driver) peerInitOp(nid string) error {
|
||||||
|
return d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
|
||||||
|
// Local entries do not need to be added
|
||||||
|
if pEntry.isLocal {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
d.peerAddOp(nid, pEntry.eid, pKey.peerIP, pEntry.peerIPMask, pKey.peerMac, pEntry.vtep, false, false, false, false)
|
||||||
|
// return false to loop on all entries
|
||||||
|
return false
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||||
peerMac net.HardwareAddr, vtep net.IP, updateDb, l2Miss, l3Miss bool) error {
|
peerMac net.HardwareAddr, vtep net.IP, updateDb, l2Miss, l3Miss, localPeer bool) {
|
||||||
|
callerName := common.CallerName(1)
|
||||||
|
d.peerOpCh <- &peerOperation{
|
||||||
|
opType: peerOperationADD,
|
||||||
|
networkID: nid,
|
||||||
|
endpointID: eid,
|
||||||
|
peerIP: peerIP,
|
||||||
|
peerIPMask: peerIPMask,
|
||||||
|
peerMac: peerMac,
|
||||||
|
vtepIP: vtep,
|
||||||
|
updateDB: updateDb,
|
||||||
|
l2Miss: l2Miss,
|
||||||
|
l3Miss: l3Miss,
|
||||||
|
localPeer: localPeer,
|
||||||
|
callerName: callerName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||||
|
peerMac net.HardwareAddr, vtep net.IP, updateDb, l2Miss, l3Miss, updateOnlyDB bool) error {
|
||||||
|
|
||||||
if err := validateID(nid, eid); err != nil {
|
if err := validateID(nid, eid); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -262,6 +313,9 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||||
|
|
||||||
if updateDb {
|
if updateDb {
|
||||||
d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, false)
|
d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, false)
|
||||||
|
if updateOnlyDB {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n := d.network(nid)
|
n := d.network(nid)
|
||||||
|
@ -271,6 +325,9 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||||
|
|
||||||
sbox := n.sandbox()
|
sbox := n.sandbox()
|
||||||
if sbox == nil {
|
if sbox == nil {
|
||||||
|
// We are hitting this case for all the events that are arriving before that the sandbox
|
||||||
|
// is being created. The peer got already added into the database and the sanbox init will
|
||||||
|
// call the peerDbUpdateSandbox that will configure all these peers from the database
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,6 +368,22 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||||
|
peerMac net.HardwareAddr, vtep net.IP, updateDb bool) {
|
||||||
|
callerName := common.CallerName(1)
|
||||||
|
d.peerOpCh <- &peerOperation{
|
||||||
|
opType: peerOperationDELETE,
|
||||||
|
networkID: nid,
|
||||||
|
endpointID: eid,
|
||||||
|
peerIP: peerIP,
|
||||||
|
peerIPMask: peerIPMask,
|
||||||
|
peerMac: peerMac,
|
||||||
|
vtepIP: vtep,
|
||||||
|
updateDB: updateDb,
|
||||||
|
callerName: callerName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||||
peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {
|
peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {
|
||||||
|
|
||||||
if err := validateID(nid, eid); err != nil {
|
if err := validateID(nid, eid); err != nil {
|
||||||
|
|
4
vendor/github.com/docker/libnetwork/endpoint_info.go
generated
vendored
4
vendor/github.com/docker/libnetwork/endpoint_info.go
generated
vendored
|
@ -413,7 +413,7 @@ func (epj *endpointJoinInfo) UnmarshalJSON(b []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if v, ok := epMap["gw"]; ok {
|
if v, ok := epMap["gw"]; ok {
|
||||||
epj.gw6 = net.ParseIP(v.(string))
|
epj.gw = net.ParseIP(v.(string))
|
||||||
}
|
}
|
||||||
if v, ok := epMap["gw6"]; ok {
|
if v, ok := epMap["gw6"]; ok {
|
||||||
epj.gw6 = net.ParseIP(v.(string))
|
epj.gw6 = net.ParseIP(v.(string))
|
||||||
|
@ -442,6 +442,6 @@ func (epj *endpointJoinInfo) CopyTo(dstEpj *endpointJoinInfo) error {
|
||||||
dstEpj.driverTableEntries = make([]*tableEntry, len(epj.driverTableEntries))
|
dstEpj.driverTableEntries = make([]*tableEntry, len(epj.driverTableEntries))
|
||||||
copy(dstEpj.driverTableEntries, epj.driverTableEntries)
|
copy(dstEpj.driverTableEntries, epj.driverTableEntries)
|
||||||
dstEpj.gw = types.GetIPCopy(epj.gw)
|
dstEpj.gw = types.GetIPCopy(epj.gw)
|
||||||
dstEpj.gw = types.GetIPCopy(epj.gw6)
|
dstEpj.gw6 = types.GetIPCopy(epj.gw6)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/docker/libnetwork/networkdb/broadcast.go
generated
vendored
3
vendor/github.com/docker/libnetwork/networkdb/broadcast.go
generated
vendored
|
@ -114,7 +114,8 @@ type tableEventMessage struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *tableEventMessage) Invalidates(other memberlist.Broadcast) bool {
|
func (m *tableEventMessage) Invalidates(other memberlist.Broadcast) bool {
|
||||||
return false
|
otherm := other.(*tableEventMessage)
|
||||||
|
return m.tname == otherm.tname && m.id == otherm.id && m.key == otherm.key
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *tableEventMessage) Message() []byte {
|
func (m *tableEventMessage) Message() []byte {
|
||||||
|
|
7
vendor/github.com/docker/libnetwork/networkdb/cluster.go
generated
vendored
7
vendor/github.com/docker/libnetwork/networkdb/cluster.go
generated
vendored
|
@ -290,13 +290,6 @@ func (nDB *NetworkDB) reconnectNode() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update all the local table state to a new time to
|
|
||||||
// force update on the node we are trying to rejoin, just in
|
|
||||||
// case that node has these in deleting state still. This is
|
|
||||||
// facilitate fast convergence after recovering from a gossip
|
|
||||||
// failure.
|
|
||||||
nDB.updateLocalTableTime()
|
|
||||||
|
|
||||||
logrus.Debugf("Initiating bulk sync with node %s after reconnect", node.Name)
|
logrus.Debugf("Initiating bulk sync with node %s after reconnect", node.Name)
|
||||||
nDB.bulkSync([]string{node.Name}, true)
|
nDB.bulkSync([]string{node.Name}, true)
|
||||||
}
|
}
|
||||||
|
|
52
vendor/github.com/docker/libnetwork/networkdb/delegate.go
generated
vendored
52
vendor/github.com/docker/libnetwork/networkdb/delegate.go
generated
vendored
|
@ -104,6 +104,9 @@ func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
n = nDB.checkAndGetNode(nEvent)
|
n = nDB.checkAndGetNode(nEvent)
|
||||||
|
if n == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
nDB.purgeSameNode(n)
|
nDB.purgeSameNode(n)
|
||||||
n.ltime = nEvent.LTime
|
n.ltime = nEvent.LTime
|
||||||
|
@ -130,25 +133,12 @@ func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nDB *NetworkDB) handleNetworkEvent(nEvent *NetworkEvent) bool {
|
func (nDB *NetworkDB) handleNetworkEvent(nEvent *NetworkEvent) bool {
|
||||||
var flushEntries bool
|
|
||||||
// Update our local clock if the received messages has newer
|
// Update our local clock if the received messages has newer
|
||||||
// time.
|
// time.
|
||||||
nDB.networkClock.Witness(nEvent.LTime)
|
nDB.networkClock.Witness(nEvent.LTime)
|
||||||
|
|
||||||
nDB.Lock()
|
nDB.Lock()
|
||||||
defer func() {
|
defer nDB.Unlock()
|
||||||
nDB.Unlock()
|
|
||||||
// When a node leaves a network on the last task removal cleanup the
|
|
||||||
// local entries for this network & node combination. When the tasks
|
|
||||||
// on a network are removed we could have missed the gossip updates.
|
|
||||||
// Not doing this cleanup can leave stale entries because bulksyncs
|
|
||||||
// from the node will no longer include this network state.
|
|
||||||
//
|
|
||||||
// deleteNodeNetworkEntries takes nDB lock.
|
|
||||||
if flushEntries {
|
|
||||||
nDB.deleteNodeNetworkEntries(nEvent.NetworkID, nEvent.NodeName)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if nEvent.NodeName == nDB.config.NodeName {
|
if nEvent.NodeName == nDB.config.NodeName {
|
||||||
return false
|
return false
|
||||||
|
@ -176,7 +166,12 @@ func (nDB *NetworkDB) handleNetworkEvent(nEvent *NetworkEvent) bool {
|
||||||
n.leaving = nEvent.Type == NetworkEventTypeLeave
|
n.leaving = nEvent.Type == NetworkEventTypeLeave
|
||||||
if n.leaving {
|
if n.leaving {
|
||||||
n.reapTime = reapInterval
|
n.reapTime = reapInterval
|
||||||
flushEntries = true
|
|
||||||
|
// The remote node is leaving the network, but not the gossip cluster.
|
||||||
|
// Mark all its entries in deleted state, this will guarantee that
|
||||||
|
// if some node bulk sync with us, the deleted state of
|
||||||
|
// these entries will be propagated.
|
||||||
|
nDB.deleteNodeNetworkEntries(nEvent.NetworkID, nEvent.NodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if nEvent.Type == NetworkEventTypeLeave {
|
if nEvent.Type == NetworkEventTypeLeave {
|
||||||
|
@ -211,17 +206,22 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
|
||||||
nDB.RLock()
|
nDB.RLock()
|
||||||
networks := nDB.networks[nDB.config.NodeName]
|
networks := nDB.networks[nDB.config.NodeName]
|
||||||
network, ok := networks[tEvent.NetworkID]
|
network, ok := networks[tEvent.NetworkID]
|
||||||
nDB.RUnlock()
|
// Check if the owner of the event is still part of the network
|
||||||
if !ok || network.leaving {
|
nodes := nDB.networkNodes[tEvent.NetworkID]
|
||||||
return true
|
var nodePresent bool
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node == tEvent.NodeName {
|
||||||
|
nodePresent = true
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
nDB.RUnlock()
|
||||||
e, err := nDB.getEntry(tEvent.TableName, tEvent.NetworkID, tEvent.Key)
|
if !ok || network.leaving || !nodePresent {
|
||||||
if err != nil && tEvent.Type == TableEventTypeDelete {
|
// I'm out of the network OR the event owner is not anymore part of the network so do not propagate
|
||||||
// If it is a delete event and we don't have the entry here nothing to do.
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
e, err := nDB.getEntry(tEvent.TableName, tEvent.NetworkID, tEvent.Key)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// We have the latest state. Ignore the event
|
// We have the latest state. Ignore the event
|
||||||
// since it is stale.
|
// since it is stale.
|
||||||
|
@ -246,6 +246,11 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
|
||||||
nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", tEvent.NetworkID, tEvent.TableName, tEvent.Key), e)
|
nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", tEvent.NetworkID, tEvent.TableName, tEvent.Key), e)
|
||||||
nDB.Unlock()
|
nDB.Unlock()
|
||||||
|
|
||||||
|
if err != nil && tEvent.Type == TableEventTypeDelete {
|
||||||
|
// If it is a delete event and we didn't have the entry here don't repropagate
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
var op opType
|
var op opType
|
||||||
switch tEvent.Type {
|
switch tEvent.Type {
|
||||||
case TableEventTypeCreate:
|
case TableEventTypeCreate:
|
||||||
|
@ -286,8 +291,7 @@ func (nDB *NetworkDB) handleTableMessage(buf []byte, isBulkSync bool) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not rebroadcast a bulk sync
|
if rebroadcast := nDB.handleTableEvent(&tEvent); rebroadcast {
|
||||||
if rebroadcast := nDB.handleTableEvent(&tEvent); rebroadcast && !isBulkSync {
|
|
||||||
var err error
|
var err error
|
||||||
buf, err = encodeRawMessage(MessageTypeTableEvent, buf)
|
buf, err = encodeRawMessage(MessageTypeTableEvent, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
8
vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
generated
vendored
8
vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
generated
vendored
|
@ -45,9 +45,12 @@ func (e *eventDelegate) NotifyLeave(mn *memberlist.Node) {
|
||||||
var failed bool
|
var failed bool
|
||||||
logrus.Infof("Node %s/%s, left gossip cluster", mn.Name, mn.Addr)
|
logrus.Infof("Node %s/%s, left gossip cluster", mn.Name, mn.Addr)
|
||||||
e.broadcastNodeEvent(mn.Addr, opDelete)
|
e.broadcastNodeEvent(mn.Addr, opDelete)
|
||||||
e.nDB.deleteNodeTableEntries(mn.Name)
|
// The node left or failed, delete all the entries created by it.
|
||||||
e.nDB.deleteNetworkEntriesForNode(mn.Name)
|
// If the node was temporary down, deleting the entries will guarantee that the CREATE events will be accepted
|
||||||
|
// If the node instead left because was going down, then it makes sense to just delete all its state
|
||||||
e.nDB.Lock()
|
e.nDB.Lock()
|
||||||
|
e.nDB.deleteNetworkEntriesForNode(mn.Name)
|
||||||
|
e.nDB.deleteNodeTableEntries(mn.Name)
|
||||||
if n, ok := e.nDB.nodes[mn.Name]; ok {
|
if n, ok := e.nDB.nodes[mn.Name]; ok {
|
||||||
delete(e.nDB.nodes, mn.Name)
|
delete(e.nDB.nodes, mn.Name)
|
||||||
|
|
||||||
|
@ -61,7 +64,6 @@ func (e *eventDelegate) NotifyLeave(mn *memberlist.Node) {
|
||||||
if failed {
|
if failed {
|
||||||
logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr)
|
logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *eventDelegate) NotifyUpdate(n *memberlist.Node) {
|
func (e *eventDelegate) NotifyUpdate(n *memberlist.Node) {
|
||||||
|
|
143
vendor/github.com/docker/libnetwork/networkdb/networkdb.go
generated
vendored
143
vendor/github.com/docker/libnetwork/networkdb/networkdb.go
generated
vendored
|
@ -108,6 +108,11 @@ type PeerInfo struct {
|
||||||
IP string
|
IP string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PeerClusterInfo represents the peer (gossip cluster) nodes
|
||||||
|
type PeerClusterInfo struct {
|
||||||
|
PeerInfo
|
||||||
|
}
|
||||||
|
|
||||||
type node struct {
|
type node struct {
|
||||||
memberlist.Node
|
memberlist.Node
|
||||||
ltime serf.LamportTime
|
ltime serf.LamportTime
|
||||||
|
@ -253,6 +258,20 @@ func (nDB *NetworkDB) Close() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClusterPeers returns all the gossip cluster peers.
|
||||||
|
func (nDB *NetworkDB) ClusterPeers() []PeerInfo {
|
||||||
|
nDB.RLock()
|
||||||
|
defer nDB.RUnlock()
|
||||||
|
peers := make([]PeerInfo, 0, len(nDB.nodes))
|
||||||
|
for _, node := range nDB.nodes {
|
||||||
|
peers = append(peers, PeerInfo{
|
||||||
|
Name: node.Name,
|
||||||
|
IP: node.Node.Addr.String(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return peers
|
||||||
|
}
|
||||||
|
|
||||||
// Peers returns the gossip peers for a given network.
|
// Peers returns the gossip peers for a given network.
|
||||||
func (nDB *NetworkDB) Peers(nid string) []PeerInfo {
|
func (nDB *NetworkDB) Peers(nid string) []PeerInfo {
|
||||||
nDB.RLock()
|
nDB.RLock()
|
||||||
|
@ -399,7 +418,6 @@ func (nDB *NetworkDB) DeleteEntry(tname, nid, key string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nDB *NetworkDB) deleteNetworkEntriesForNode(deletedNode string) {
|
func (nDB *NetworkDB) deleteNetworkEntriesForNode(deletedNode string) {
|
||||||
nDB.Lock()
|
|
||||||
for nid, nodes := range nDB.networkNodes {
|
for nid, nodes := range nDB.networkNodes {
|
||||||
updatedNodes := make([]string, 0, len(nodes))
|
updatedNodes := make([]string, 0, len(nodes))
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
|
@ -414,11 +432,25 @@ func (nDB *NetworkDB) deleteNetworkEntriesForNode(deletedNode string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(nDB.networks, deletedNode)
|
delete(nDB.networks, deletedNode)
|
||||||
nDB.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// deleteNodeNetworkEntries is called in 2 conditions with 2 different outcomes:
|
||||||
|
// 1) when a notification is coming of a node leaving the network
|
||||||
|
// - Walk all the network entries and mark the leaving node's entries for deletion
|
||||||
|
// These will be garbage collected when the reap timer will expire
|
||||||
|
// 2) when the local node is leaving the network
|
||||||
|
// - Walk all the network entries:
|
||||||
|
// A) if the entry is owned by the local node
|
||||||
|
// then we will mark it for deletion. This will ensure that if a node did not
|
||||||
|
// yet received the notification that the local node is leaving, will be aware
|
||||||
|
// of the entries to be deleted.
|
||||||
|
// B) if the entry is owned by a remote node, then we can safely delete it. This
|
||||||
|
// ensures that if we join back this network as we receive the CREATE event for
|
||||||
|
// entries owned by remote nodes, we will accept them and we notify the application
|
||||||
func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) {
|
func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) {
|
||||||
nDB.Lock()
|
// Indicates if the delete is triggered for the local node
|
||||||
|
isNodeLocal := node == nDB.config.NodeName
|
||||||
|
|
||||||
nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid),
|
nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid),
|
||||||
func(path string, v interface{}) bool {
|
func(path string, v interface{}) bool {
|
||||||
oldEntry := v.(*entry)
|
oldEntry := v.(*entry)
|
||||||
|
@ -427,7 +459,15 @@ func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) {
|
||||||
tname := params[1]
|
tname := params[1]
|
||||||
key := params[2]
|
key := params[2]
|
||||||
|
|
||||||
if oldEntry.node != node {
|
// If the entry is owned by a remote node and this node is not leaving the network
|
||||||
|
if oldEntry.node != node && !isNodeLocal {
|
||||||
|
// Don't do anything because the event is triggered for a node that does not own this entry
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this entry is already marked for deletion and this node is not leaving the network
|
||||||
|
if oldEntry.deleting && !isNodeLocal {
|
||||||
|
// Don't do anything this entry will be already garbage collected using the old reapTime
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -439,17 +479,29 @@ func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) {
|
||||||
reapTime: reapInterval,
|
reapTime: reapInterval,
|
||||||
}
|
}
|
||||||
|
|
||||||
nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
|
// we arrived at this point in 2 cases:
|
||||||
nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
|
// 1) this entry is owned by the node that is leaving the network
|
||||||
|
// 2) the local node is leaving the network
|
||||||
|
if oldEntry.node == node {
|
||||||
|
if isNodeLocal {
|
||||||
|
// TODO fcrisciani: this can be removed if there is no way to leave the network
|
||||||
|
// without doing a delete of all the objects
|
||||||
|
entry.ltime++
|
||||||
|
}
|
||||||
|
nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
|
||||||
|
nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
|
||||||
|
} else {
|
||||||
|
// the local node is leaving the network, all the entries of remote nodes can be safely removed
|
||||||
|
nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key))
|
||||||
|
nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key))
|
||||||
|
}
|
||||||
|
|
||||||
nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, entry.value))
|
nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, entry.value))
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
nDB.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nDB *NetworkDB) deleteNodeTableEntries(node string) {
|
func (nDB *NetworkDB) deleteNodeTableEntries(node string) {
|
||||||
nDB.Lock()
|
|
||||||
nDB.indexes[byTable].Walk(func(path string, v interface{}) bool {
|
nDB.indexes[byTable].Walk(func(path string, v interface{}) bool {
|
||||||
oldEntry := v.(*entry)
|
oldEntry := v.(*entry)
|
||||||
if oldEntry.node != node {
|
if oldEntry.node != node {
|
||||||
|
@ -461,27 +513,18 @@ func (nDB *NetworkDB) deleteNodeTableEntries(node string) {
|
||||||
nid := params[1]
|
nid := params[1]
|
||||||
key := params[2]
|
key := params[2]
|
||||||
|
|
||||||
entry := &entry{
|
nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key))
|
||||||
ltime: oldEntry.ltime,
|
nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key))
|
||||||
node: node,
|
|
||||||
value: oldEntry.value,
|
|
||||||
deleting: true,
|
|
||||||
reapTime: reapInterval,
|
|
||||||
}
|
|
||||||
|
|
||||||
nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
|
nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, oldEntry.value))
|
||||||
nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
|
|
||||||
|
|
||||||
nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, entry.value))
|
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
nDB.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WalkTable walks a single table in NetworkDB and invokes the passed
|
// WalkTable walks a single table in NetworkDB and invokes the passed
|
||||||
// function for each entry in the table passing the network, key,
|
// function for each entry in the table passing the network, key,
|
||||||
// value. The walk stops if the passed function returns a true.
|
// value. The walk stops if the passed function returns a true.
|
||||||
func (nDB *NetworkDB) WalkTable(tname string, fn func(string, string, []byte) bool) error {
|
func (nDB *NetworkDB) WalkTable(tname string, fn func(string, string, []byte, bool) bool) error {
|
||||||
nDB.RLock()
|
nDB.RLock()
|
||||||
values := make(map[string]interface{})
|
values := make(map[string]interface{})
|
||||||
nDB.indexes[byTable].WalkPrefix(fmt.Sprintf("/%s", tname), func(path string, v interface{}) bool {
|
nDB.indexes[byTable].WalkPrefix(fmt.Sprintf("/%s", tname), func(path string, v interface{}) bool {
|
||||||
|
@ -494,7 +537,7 @@ func (nDB *NetworkDB) WalkTable(tname string, fn func(string, string, []byte) bo
|
||||||
params := strings.Split(k[1:], "/")
|
params := strings.Split(k[1:], "/")
|
||||||
nid := params[1]
|
nid := params[1]
|
||||||
key := params[2]
|
key := params[2]
|
||||||
if fn(nid, key, v.(*entry).value) {
|
if fn(nid, key, v.(*entry).value, v.(*entry).deleting) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -554,37 +597,12 @@ func (nDB *NetworkDB) LeaveNetwork(nid string) error {
|
||||||
|
|
||||||
nDB.Lock()
|
nDB.Lock()
|
||||||
defer nDB.Unlock()
|
defer nDB.Unlock()
|
||||||
var (
|
|
||||||
paths []string
|
|
||||||
entries []*entry
|
|
||||||
)
|
|
||||||
|
|
||||||
|
// Remove myself from the list of the nodes participating to the network
|
||||||
nDB.deleteNetworkNode(nid, nDB.config.NodeName)
|
nDB.deleteNetworkNode(nid, nDB.config.NodeName)
|
||||||
|
|
||||||
nwWalker := func(path string, v interface{}) bool {
|
// Update all the local entries marking them for deletion and delete all the remote entries
|
||||||
entry, ok := v.(*entry)
|
nDB.deleteNodeNetworkEntries(nid, nDB.config.NodeName)
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
paths = append(paths, path)
|
|
||||||
entries = append(entries, entry)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), nwWalker)
|
|
||||||
for _, path := range paths {
|
|
||||||
params := strings.Split(path[1:], "/")
|
|
||||||
tname := params[1]
|
|
||||||
key := params[2]
|
|
||||||
|
|
||||||
if _, ok := nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key)); !ok {
|
|
||||||
logrus.Errorf("Could not delete entry in table %s with network id %s and key %s as it does not exist", tname, nid, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key)); !ok {
|
|
||||||
logrus.Errorf("Could not delete entry in network %s with table name %s and key %s as it does not exist", nid, tname, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeNetworks, ok := nDB.networks[nDB.config.NodeName]
|
nodeNetworks, ok := nDB.networks[nDB.config.NodeName]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -597,6 +615,7 @@ func (nDB *NetworkDB) LeaveNetwork(nid string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
n.ltime = ltime
|
n.ltime = ltime
|
||||||
|
n.reapTime = reapInterval
|
||||||
n.leaving = true
|
n.leaving = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -660,27 +679,3 @@ func (nDB *NetworkDB) updateLocalNetworkTime() {
|
||||||
n.ltime = ltime
|
n.ltime = ltime
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nDB *NetworkDB) updateLocalTableTime() {
|
|
||||||
nDB.Lock()
|
|
||||||
defer nDB.Unlock()
|
|
||||||
|
|
||||||
ltime := nDB.tableClock.Increment()
|
|
||||||
nDB.indexes[byTable].Walk(func(path string, v interface{}) bool {
|
|
||||||
entry := v.(*entry)
|
|
||||||
if entry.node != nDB.config.NodeName {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
params := strings.Split(path[1:], "/")
|
|
||||||
tname := params[0]
|
|
||||||
nid := params[1]
|
|
||||||
key := params[2]
|
|
||||||
entry.ltime = ltime
|
|
||||||
|
|
||||||
nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
|
|
||||||
nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
|
|
||||||
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
242
vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
generated
vendored
Normal file
242
vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
||||||
|
package networkdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/libnetwork/diagnose"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
missingParameter = "missing parameter"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NetDbPaths2Func TODO
|
||||||
|
var NetDbPaths2Func = map[string]diagnose.HTTPHandlerFunc{
|
||||||
|
"/join": dbJoin,
|
||||||
|
"/networkpeers": dbPeers,
|
||||||
|
"/clusterpeers": dbClusterPeers,
|
||||||
|
"/joinnetwork": dbJoinNetwork,
|
||||||
|
"/leavenetwork": dbLeaveNetwork,
|
||||||
|
"/createentry": dbCreateEntry,
|
||||||
|
"/updateentry": dbUpdateEntry,
|
||||||
|
"/deleteentry": dbDeleteEntry,
|
||||||
|
"/getentry": dbGetEntry,
|
||||||
|
"/gettable": dbGetTable,
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbJoin(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.ParseForm()
|
||||||
|
diagnose.DebugHTTPForm(r)
|
||||||
|
if len(r.Form["members"]) < 1 {
|
||||||
|
diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?members=ip1,ip2,...", r.URL.Path))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
nDB, ok := ctx.(*NetworkDB)
|
||||||
|
if ok {
|
||||||
|
err := nDB.Join(strings.Split(r.Form["members"][0], ","))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(w, "%s error in the DB join %s\n", r.URL.Path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "OK\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.ParseForm()
|
||||||
|
diagnose.DebugHTTPForm(r)
|
||||||
|
if len(r.Form["nid"]) < 1 {
|
||||||
|
diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?nid=test", r.URL.Path))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
nDB, ok := ctx.(*NetworkDB)
|
||||||
|
if ok {
|
||||||
|
peers := nDB.Peers(r.Form["nid"][0])
|
||||||
|
fmt.Fprintf(w, "Network:%s Total peers: %d\n", r.Form["nid"], len(peers))
|
||||||
|
for i, peerInfo := range peers {
|
||||||
|
fmt.Fprintf(w, "%d) %s -> %s\n", i, peerInfo.Name, peerInfo.IP)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbClusterPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
nDB, ok := ctx.(*NetworkDB)
|
||||||
|
if ok {
|
||||||
|
peers := nDB.ClusterPeers()
|
||||||
|
fmt.Fprintf(w, "Total peers: %d\n", len(peers))
|
||||||
|
for i, peerInfo := range peers {
|
||||||
|
fmt.Fprintf(w, "%d) %s -> %s\n", i, peerInfo.Name, peerInfo.IP)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbCreateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.ParseForm()
|
||||||
|
diagnose.DebugHTTPForm(r)
|
||||||
|
if len(r.Form["tname"]) < 1 ||
|
||||||
|
len(r.Form["nid"]) < 1 ||
|
||||||
|
len(r.Form["key"]) < 1 ||
|
||||||
|
len(r.Form["value"]) < 1 {
|
||||||
|
diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tname := r.Form["tname"][0]
|
||||||
|
nid := r.Form["nid"][0]
|
||||||
|
key := r.Form["key"][0]
|
||||||
|
value := r.Form["value"][0]
|
||||||
|
|
||||||
|
nDB, ok := ctx.(*NetworkDB)
|
||||||
|
if ok {
|
||||||
|
if err := nDB.CreateEntry(tname, nid, key, []byte(value)); err != nil {
|
||||||
|
diagnose.HTTPReplyError(w, err.Error(), "")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "OK\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbUpdateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.ParseForm()
|
||||||
|
diagnose.DebugHTTPForm(r)
|
||||||
|
if len(r.Form["tname"]) < 1 ||
|
||||||
|
len(r.Form["nid"]) < 1 ||
|
||||||
|
len(r.Form["key"]) < 1 ||
|
||||||
|
len(r.Form["value"]) < 1 {
|
||||||
|
diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tname := r.Form["tname"][0]
|
||||||
|
nid := r.Form["nid"][0]
|
||||||
|
key := r.Form["key"][0]
|
||||||
|
value := r.Form["value"][0]
|
||||||
|
|
||||||
|
nDB, ok := ctx.(*NetworkDB)
|
||||||
|
if ok {
|
||||||
|
if err := nDB.UpdateEntry(tname, nid, key, []byte(value)); err != nil {
|
||||||
|
diagnose.HTTPReplyError(w, err.Error(), "")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "OK\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbDeleteEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.ParseForm()
|
||||||
|
diagnose.DebugHTTPForm(r)
|
||||||
|
if len(r.Form["tname"]) < 1 ||
|
||||||
|
len(r.Form["nid"]) < 1 ||
|
||||||
|
len(r.Form["key"]) < 1 {
|
||||||
|
diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tname := r.Form["tname"][0]
|
||||||
|
nid := r.Form["nid"][0]
|
||||||
|
key := r.Form["key"][0]
|
||||||
|
|
||||||
|
nDB, ok := ctx.(*NetworkDB)
|
||||||
|
if ok {
|
||||||
|
err := nDB.DeleteEntry(tname, nid, key)
|
||||||
|
if err != nil {
|
||||||
|
diagnose.HTTPReplyError(w, err.Error(), "")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "OK\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbGetEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.ParseForm()
|
||||||
|
diagnose.DebugHTTPForm(r)
|
||||||
|
if len(r.Form["tname"]) < 1 ||
|
||||||
|
len(r.Form["nid"]) < 1 ||
|
||||||
|
len(r.Form["key"]) < 1 {
|
||||||
|
diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tname := r.Form["tname"][0]
|
||||||
|
nid := r.Form["nid"][0]
|
||||||
|
key := r.Form["key"][0]
|
||||||
|
|
||||||
|
nDB, ok := ctx.(*NetworkDB)
|
||||||
|
if ok {
|
||||||
|
value, err := nDB.GetEntry(tname, nid, key)
|
||||||
|
if err != nil {
|
||||||
|
diagnose.HTTPReplyError(w, err.Error(), "")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "key:`%s` value:`%s`\n", key, string(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbJoinNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.ParseForm()
|
||||||
|
diagnose.DebugHTTPForm(r)
|
||||||
|
if len(r.Form["nid"]) < 1 {
|
||||||
|
diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
nid := r.Form["nid"][0]
|
||||||
|
|
||||||
|
nDB, ok := ctx.(*NetworkDB)
|
||||||
|
if ok {
|
||||||
|
if err := nDB.JoinNetwork(nid); err != nil {
|
||||||
|
diagnose.HTTPReplyError(w, err.Error(), "")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "OK\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbLeaveNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.ParseForm()
|
||||||
|
diagnose.DebugHTTPForm(r)
|
||||||
|
if len(r.Form["nid"]) < 1 {
|
||||||
|
diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
nid := r.Form["nid"][0]
|
||||||
|
|
||||||
|
nDB, ok := ctx.(*NetworkDB)
|
||||||
|
if ok {
|
||||||
|
if err := nDB.LeaveNetwork(nid); err != nil {
|
||||||
|
diagnose.HTTPReplyError(w, err.Error(), "")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "OK\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.ParseForm()
|
||||||
|
diagnose.DebugHTTPForm(r)
|
||||||
|
if len(r.Form["tname"]) < 1 ||
|
||||||
|
len(r.Form["nid"]) < 1 {
|
||||||
|
diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id", r.URL.Path))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tname := r.Form["tname"][0]
|
||||||
|
nid := r.Form["nid"][0]
|
||||||
|
|
||||||
|
nDB, ok := ctx.(*NetworkDB)
|
||||||
|
if ok {
|
||||||
|
table := nDB.GetTableByNetwork(tname, nid)
|
||||||
|
fmt.Fprintf(w, "total elements: %d\n", len(table))
|
||||||
|
i := 0
|
||||||
|
for k, v := range table {
|
||||||
|
fmt.Fprintf(w, "%d) k:`%s` -> v:`%s`\n", i, k, string(v.([]byte)))
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
19
vendor/github.com/docker/libnetwork/osl/interface_linux.go
generated
vendored
19
vendor/github.com/docker/libnetwork/osl/interface_linux.go
generated
vendored
|
@ -26,7 +26,6 @@ type nwIface struct {
|
||||||
mac net.HardwareAddr
|
mac net.HardwareAddr
|
||||||
address *net.IPNet
|
address *net.IPNet
|
||||||
addressIPv6 *net.IPNet
|
addressIPv6 *net.IPNet
|
||||||
ipAliases []*net.IPNet
|
|
||||||
llAddrs []*net.IPNet
|
llAddrs []*net.IPNet
|
||||||
routes []*net.IPNet
|
routes []*net.IPNet
|
||||||
bridge bool
|
bridge bool
|
||||||
|
@ -97,13 +96,6 @@ func (i *nwIface) LinkLocalAddresses() []*net.IPNet {
|
||||||
return i.llAddrs
|
return i.llAddrs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *nwIface) IPAliases() []*net.IPNet {
|
|
||||||
i.Lock()
|
|
||||||
defer i.Unlock()
|
|
||||||
|
|
||||||
return i.ipAliases
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *nwIface) Routes() []*net.IPNet {
|
func (i *nwIface) Routes() []*net.IPNet {
|
||||||
i.Lock()
|
i.Lock()
|
||||||
defer i.Unlock()
|
defer i.Unlock()
|
||||||
|
@ -337,7 +329,6 @@ func configureInterface(nlh *netlink.Handle, iface netlink.Link, i *nwIface) err
|
||||||
{setInterfaceIPv6, fmt.Sprintf("error setting interface %q IPv6 to %v", ifaceName, i.AddressIPv6())},
|
{setInterfaceIPv6, fmt.Sprintf("error setting interface %q IPv6 to %v", ifaceName, i.AddressIPv6())},
|
||||||
{setInterfaceMaster, fmt.Sprintf("error setting interface %q master to %q", ifaceName, i.DstMaster())},
|
{setInterfaceMaster, fmt.Sprintf("error setting interface %q master to %q", ifaceName, i.DstMaster())},
|
||||||
{setInterfaceLinkLocalIPs, fmt.Sprintf("error setting interface %q link local IPs to %v", ifaceName, i.LinkLocalAddresses())},
|
{setInterfaceLinkLocalIPs, fmt.Sprintf("error setting interface %q link local IPs to %v", ifaceName, i.LinkLocalAddresses())},
|
||||||
{setInterfaceIPAliases, fmt.Sprintf("error setting interface %q IP Aliases to %v", ifaceName, i.IPAliases())},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, config := range ifaceConfigurators {
|
for _, config := range ifaceConfigurators {
|
||||||
|
@ -399,16 +390,6 @@ func setInterfaceLinkLocalIPs(nlh *netlink.Handle, iface netlink.Link, i *nwIfac
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func setInterfaceIPAliases(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
|
|
||||||
for _, si := range i.IPAliases() {
|
|
||||||
ipAddr := &netlink.Addr{IPNet: si}
|
|
||||||
if err := nlh.AddrAdd(iface, ipAddr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setInterfaceName(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
|
func setInterfaceName(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error {
|
||||||
return nlh.LinkSetName(iface, i.DstName())
|
return nlh.LinkSetName(iface, i.DstName())
|
||||||
}
|
}
|
||||||
|
|
16
vendor/github.com/docker/libnetwork/osl/namespace_linux.go
generated
vendored
16
vendor/github.com/docker/libnetwork/osl/namespace_linux.go
generated
vendored
|
@ -356,6 +356,22 @@ func (n *networkNamespace) loopbackUp() error {
|
||||||
return n.nlHandle.LinkSetUp(iface)
|
return n.nlHandle.LinkSetUp(iface)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *networkNamespace) AddLoopbackAliasIP(ip *net.IPNet) error {
|
||||||
|
iface, err := n.nlHandle.LinkByName("lo")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return n.nlHandle.AddrAdd(iface, &netlink.Addr{IPNet: ip})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *networkNamespace) RemoveLoopbackAliasIP(ip *net.IPNet) error {
|
||||||
|
iface, err := n.nlHandle.LinkByName("lo")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return n.nlHandle.AddrDel(iface, &netlink.Addr{IPNet: ip})
|
||||||
|
}
|
||||||
|
|
||||||
func (n *networkNamespace) InvokeFunc(f func()) error {
|
func (n *networkNamespace) InvokeFunc(f func()) error {
|
||||||
return nsInvoke(n.nsPath(), func(nsFD int) error { return nil }, func(callerFD int) error {
|
return nsInvoke(n.nsPath(), func(nsFD int) error { return nil }, func(callerFD int) error {
|
||||||
f()
|
f()
|
||||||
|
|
4
vendor/github.com/docker/libnetwork/osl/neigh_linux.go
generated
vendored
4
vendor/github.com/docker/libnetwork/osl/neigh_linux.go
generated
vendored
|
@ -91,9 +91,7 @@ func (n *networkNamespace) DeleteNeighbor(dstIP net.IP, dstMac net.HardwareAddr,
|
||||||
if nh.linkDst != "" {
|
if nh.linkDst != "" {
|
||||||
nlnh.LinkIndex = iface.Attrs().Index
|
nlnh.LinkIndex = iface.Attrs().Index
|
||||||
}
|
}
|
||||||
if err := nlh.NeighDel(nlnh); err != nil {
|
nlh.NeighDel(nlnh)
|
||||||
logrus.Warnf("Deleting bridge mac mac %s failed, %v", dstMac, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
6
vendor/github.com/docker/libnetwork/osl/options_linux.go
generated
vendored
6
vendor/github.com/docker/libnetwork/osl/options_linux.go
generated
vendored
|
@ -66,12 +66,6 @@ func (n *networkNamespace) LinkLocalAddresses(list []*net.IPNet) IfaceOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *networkNamespace) IPAliases(list []*net.IPNet) IfaceOption {
|
|
||||||
return func(i *nwIface) {
|
|
||||||
i.ipAliases = list
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *networkNamespace) Routes(routes []*net.IPNet) IfaceOption {
|
func (n *networkNamespace) Routes(routes []*net.IPNet) IfaceOption {
|
||||||
return func(i *nwIface) {
|
return func(i *nwIface) {
|
||||||
i.routes = routes
|
i.routes = routes
|
||||||
|
|
12
vendor/github.com/docker/libnetwork/osl/sandbox.go
generated
vendored
12
vendor/github.com/docker/libnetwork/osl/sandbox.go
generated
vendored
|
@ -32,6 +32,12 @@ type Sandbox interface {
|
||||||
// Unset the previously set default IPv6 gateway in the sandbox
|
// Unset the previously set default IPv6 gateway in the sandbox
|
||||||
UnsetGatewayIPv6() error
|
UnsetGatewayIPv6() error
|
||||||
|
|
||||||
|
// AddLoopbackAliasIP adds the passed IP address to the sandbox loopback interface
|
||||||
|
AddLoopbackAliasIP(ip *net.IPNet) error
|
||||||
|
|
||||||
|
// RemoveLoopbackAliasIP removes the passed IP address from the sandbox loopback interface
|
||||||
|
RemoveLoopbackAliasIP(ip *net.IPNet) error
|
||||||
|
|
||||||
// Add a static route to the sandbox.
|
// Add a static route to the sandbox.
|
||||||
AddStaticRoute(*types.StaticRoute) error
|
AddStaticRoute(*types.StaticRoute) error
|
||||||
|
|
||||||
|
@ -91,9 +97,6 @@ type IfaceOptionSetter interface {
|
||||||
// LinkLocalAddresses returns an option setter to set the link-local IP addresses.
|
// LinkLocalAddresses returns an option setter to set the link-local IP addresses.
|
||||||
LinkLocalAddresses([]*net.IPNet) IfaceOption
|
LinkLocalAddresses([]*net.IPNet) IfaceOption
|
||||||
|
|
||||||
// IPAliases returns an option setter to set IP address Aliases
|
|
||||||
IPAliases([]*net.IPNet) IfaceOption
|
|
||||||
|
|
||||||
// Master returns an option setter to set the master interface if any for this
|
// Master returns an option setter to set the master interface if any for this
|
||||||
// interface. The master interface name should refer to the srcname of a
|
// interface. The master interface name should refer to the srcname of a
|
||||||
// previously added interface of type bridge.
|
// previously added interface of type bridge.
|
||||||
|
@ -150,9 +153,6 @@ type Interface interface {
|
||||||
// LinkLocalAddresses returns the link-local IP addresses assigned to the interface.
|
// LinkLocalAddresses returns the link-local IP addresses assigned to the interface.
|
||||||
LinkLocalAddresses() []*net.IPNet
|
LinkLocalAddresses() []*net.IPNet
|
||||||
|
|
||||||
// IPAliases returns the IP address aliases assigned to the interface.
|
|
||||||
IPAliases() []*net.IPNet
|
|
||||||
|
|
||||||
// IP routes for the interface.
|
// IP routes for the interface.
|
||||||
Routes() []*net.IPNet
|
Routes() []*net.IPNet
|
||||||
|
|
||||||
|
|
32
vendor/github.com/docker/libnetwork/resolver.go
generated
vendored
32
vendor/github.com/docker/libnetwork/resolver.go
generated
vendored
|
@ -231,7 +231,7 @@ func (r *resolver) handleIPQuery(name string, query *dns.Msg, ipType int) (*dns.
|
||||||
|
|
||||||
if addr == nil && ipv6Miss {
|
if addr == nil && ipv6Miss {
|
||||||
// Send a reply without any Answer sections
|
// Send a reply without any Answer sections
|
||||||
logrus.Debugf("Lookup name %s present without IPv6 address", name)
|
logrus.Debugf("[resolver] lookup name %s present without IPv6 address", name)
|
||||||
resp := createRespMsg(query)
|
resp := createRespMsg(query)
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
@ -239,7 +239,7 @@ func (r *resolver) handleIPQuery(name string, query *dns.Msg, ipType int) (*dns.
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Lookup for %s: IP %v", name, addr)
|
logrus.Debugf("[resolver] lookup for %s: IP %v", name, addr)
|
||||||
|
|
||||||
resp := createRespMsg(query)
|
resp := createRespMsg(query)
|
||||||
if len(addr) > 1 {
|
if len(addr) > 1 {
|
||||||
|
@ -280,7 +280,7 @@ func (r *resolver) handlePTRQuery(ptr string, query *dns.Msg) (*dns.Msg, error)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Lookup for IP %s: name %s", parts[0], host)
|
logrus.Debugf("[resolver] lookup for IP %s: name %s", parts[0], host)
|
||||||
fqdn := dns.Fqdn(host)
|
fqdn := dns.Fqdn(host)
|
||||||
|
|
||||||
resp := new(dns.Msg)
|
resp := new(dns.Msg)
|
||||||
|
@ -431,10 +431,12 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Warnf("Connect failed: %s", err)
|
logrus.Warnf("[resolver] connect failed: %s", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
logrus.Debugf("Query %s[%d] from %s, forwarding to %s:%s", name, query.Question[0].Qtype,
|
|
||||||
|
queryType := dns.TypeToString[query.Question[0].Qtype]
|
||||||
|
logrus.Debugf("[resolver] query %s (%s) from %s, forwarding to %s:%s", name, queryType,
|
||||||
extConn.LocalAddr().String(), proto, extDNS.IPStr)
|
extConn.LocalAddr().String(), proto, extDNS.IPStr)
|
||||||
|
|
||||||
// Timeout has to be set for every IO operation.
|
// Timeout has to be set for every IO operation.
|
||||||
|
@ -450,7 +452,7 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
||||||
old := r.tStamp
|
old := r.tStamp
|
||||||
r.tStamp = time.Now()
|
r.tStamp = time.Now()
|
||||||
if r.tStamp.Sub(old) > logInterval {
|
if r.tStamp.Sub(old) > logInterval {
|
||||||
logrus.Errorf("More than %v concurrent queries from %s", maxConcurrent, extConn.LocalAddr().String())
|
logrus.Errorf("[resolver] more than %v concurrent queries from %s", maxConcurrent, extConn.LocalAddr().String())
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -458,7 +460,7 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
||||||
err = co.WriteMsg(query)
|
err = co.WriteMsg(query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.forwardQueryEnd()
|
r.forwardQueryEnd()
|
||||||
logrus.Debugf("Send to DNS server failed, %s", err)
|
logrus.Debugf("[resolver] send to DNS server failed, %s", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -467,22 +469,32 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
||||||
// client can retry over TCP
|
// client can retry over TCP
|
||||||
if err != nil && err != dns.ErrTruncated {
|
if err != nil && err != dns.ErrTruncated {
|
||||||
r.forwardQueryEnd()
|
r.forwardQueryEnd()
|
||||||
logrus.Debugf("Read from DNS server failed, %s", err)
|
logrus.Debugf("[resolver] read from DNS server failed, %s", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
r.forwardQueryEnd()
|
r.forwardQueryEnd()
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
|
answers := 0
|
||||||
for _, rr := range resp.Answer {
|
for _, rr := range resp.Answer {
|
||||||
h := rr.Header()
|
h := rr.Header()
|
||||||
switch h.Rrtype {
|
switch h.Rrtype {
|
||||||
case dns.TypeA:
|
case dns.TypeA:
|
||||||
|
answers++
|
||||||
ip := rr.(*dns.A).A
|
ip := rr.(*dns.A).A
|
||||||
|
logrus.Debugf("[resolver] received A record %q for %q from %s:%s", ip, h.Name, proto, extDNS.IPStr)
|
||||||
r.backend.HandleQueryResp(h.Name, ip)
|
r.backend.HandleQueryResp(h.Name, ip)
|
||||||
case dns.TypeAAAA:
|
case dns.TypeAAAA:
|
||||||
|
answers++
|
||||||
ip := rr.(*dns.AAAA).AAAA
|
ip := rr.(*dns.AAAA).AAAA
|
||||||
|
logrus.Debugf("[resolver] received AAAA record %q for %q from %s:%s", ip, h.Name, proto, extDNS.IPStr)
|
||||||
r.backend.HandleQueryResp(h.Name, ip)
|
r.backend.HandleQueryResp(h.Name, ip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if resp.Answer == nil || answers == 0 {
|
||||||
|
logrus.Debugf("[resolver] external DNS %s:%s did not return any %s records for %q", proto, extDNS.IPStr, queryType, name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logrus.Debugf("[resolver] external DNS %s:%s returned empty response for %q", proto, extDNS.IPStr, name)
|
||||||
}
|
}
|
||||||
resp.Compress = true
|
resp.Compress = true
|
||||||
break
|
break
|
||||||
|
@ -493,7 +505,7 @@ func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = w.WriteMsg(resp); err != nil {
|
if err = w.WriteMsg(resp); err != nil {
|
||||||
logrus.Errorf("error writing resolver resp, %s", err)
|
logrus.Errorf("[resolver] error writing resolver resp, %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -514,7 +526,7 @@ func (r *resolver) forwardQueryEnd() {
|
||||||
defer r.queryLock.Unlock()
|
defer r.queryLock.Unlock()
|
||||||
|
|
||||||
if r.count == 0 {
|
if r.count == 0 {
|
||||||
logrus.Error("Invalid concurrent query count")
|
logrus.Error("[resolver] invalid concurrent query count")
|
||||||
} else {
|
} else {
|
||||||
r.count--
|
r.count--
|
||||||
}
|
}
|
||||||
|
|
22
vendor/github.com/docker/libnetwork/sandbox.go
generated
vendored
22
vendor/github.com/docker/libnetwork/sandbox.go
generated
vendored
|
@ -709,8 +709,15 @@ func releaseOSSboxResources(osSbox osl.Sandbox, ep *endpoint) {
|
||||||
|
|
||||||
ep.Lock()
|
ep.Lock()
|
||||||
joinInfo := ep.joinInfo
|
joinInfo := ep.joinInfo
|
||||||
|
vip := ep.virtualIP
|
||||||
ep.Unlock()
|
ep.Unlock()
|
||||||
|
|
||||||
|
if len(vip) != 0 {
|
||||||
|
if err := osSbox.RemoveLoopbackAliasIP(&net.IPNet{IP: vip, Mask: net.CIDRMask(32, 32)}); err != nil {
|
||||||
|
logrus.Warnf("Remove virtual IP %v failed: %v", vip, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if joinInfo == nil {
|
if joinInfo == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -767,10 +774,6 @@ func (sb *sandbox) restoreOslSandbox() error {
|
||||||
if len(i.llAddrs) != 0 {
|
if len(i.llAddrs) != 0 {
|
||||||
ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().LinkLocalAddresses(i.llAddrs))
|
ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().LinkLocalAddresses(i.llAddrs))
|
||||||
}
|
}
|
||||||
if len(ep.virtualIP) != 0 {
|
|
||||||
vipAlias := &net.IPNet{IP: ep.virtualIP, Mask: net.CIDRMask(32, 32)}
|
|
||||||
ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().IPAliases([]*net.IPNet{vipAlias}))
|
|
||||||
}
|
|
||||||
Ifaces[fmt.Sprintf("%s+%s", i.srcName, i.dstPrefix)] = ifaceOptions
|
Ifaces[fmt.Sprintf("%s+%s", i.srcName, i.dstPrefix)] = ifaceOptions
|
||||||
if joinInfo != nil {
|
if joinInfo != nil {
|
||||||
routes = append(routes, joinInfo.StaticRoutes...)
|
routes = append(routes, joinInfo.StaticRoutes...)
|
||||||
|
@ -818,10 +821,6 @@ func (sb *sandbox) populateNetworkResources(ep *endpoint) error {
|
||||||
if len(i.llAddrs) != 0 {
|
if len(i.llAddrs) != 0 {
|
||||||
ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().LinkLocalAddresses(i.llAddrs))
|
ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().LinkLocalAddresses(i.llAddrs))
|
||||||
}
|
}
|
||||||
if len(ep.virtualIP) != 0 {
|
|
||||||
vipAlias := &net.IPNet{IP: ep.virtualIP, Mask: net.CIDRMask(32, 32)}
|
|
||||||
ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().IPAliases([]*net.IPNet{vipAlias}))
|
|
||||||
}
|
|
||||||
if i.mac != nil {
|
if i.mac != nil {
|
||||||
ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().MacAddress(i.mac))
|
ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().MacAddress(i.mac))
|
||||||
}
|
}
|
||||||
|
@ -831,6 +830,13 @@ func (sb *sandbox) populateNetworkResources(ep *endpoint) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(ep.virtualIP) != 0 {
|
||||||
|
err := sb.osSbox.AddLoopbackAliasIP(&net.IPNet{IP: ep.virtualIP, Mask: net.CIDRMask(32, 32)})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add virtual IP %v: %v", ep.virtualIP, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if joinInfo != nil {
|
if joinInfo != nil {
|
||||||
// Set up non-interface routes.
|
// Set up non-interface routes.
|
||||||
for _, r := range joinInfo.StaticRoutes {
|
for _, r := range joinInfo.StaticRoutes {
|
||||||
|
|
14
vendor/github.com/docker/libnetwork/vendor.conf
generated
vendored
14
vendor/github.com/docker/libnetwork/vendor.conf
generated
vendored
|
@ -1,8 +1,7 @@
|
||||||
github.com/Azure/go-ansiterm 04b7f292a41fcb5da32dda536c0807fc13e8351c
|
github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
|
||||||
github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
|
github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
|
||||||
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
|
||||||
github.com/Microsoft/hcsshim e439b7d2b63f036d3a50c93a9e0b154a0d50e788
|
github.com/Microsoft/hcsshim v0.6.1
|
||||||
github.com/Sirupsen/logrus 4b6ea7319e214d98c938f12692336f7ca9348d6b
|
|
||||||
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||||
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
||||||
github.com/boltdb/bolt c6ba97b89e0454fec9aa92e1d33a4e2c5fc1f631
|
github.com/boltdb/bolt c6ba97b89e0454fec9aa92e1d33a4e2c5fc1f631
|
||||||
|
@ -11,9 +10,9 @@ github.com/coreos/etcd 925d1d74cec8c3b169c52fd4b2dc234a35934fce
|
||||||
github.com/coreos/go-systemd b4a58d95188dd092ae20072bac14cece0e67c388
|
github.com/coreos/go-systemd b4a58d95188dd092ae20072bac14cece0e67c388
|
||||||
github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
|
github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
|
||||||
|
|
||||||
github.com/docker/docker 9c96768eae4b3a65147b47a55c850c103ab8972d
|
github.com/docker/docker 2cac43e3573893cf8fd816e0ad5615426acb87f4 https://github.com/dmcgowan/docker.git
|
||||||
github.com/docker/go-connections 34b5052da6b11e27f5f2e357b38b571ddddd3928
|
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||||
github.com/docker/go-events 2e7d352816128aa84f4d29b2a21d400133701a0d
|
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||||
github.com/docker/go-units 8e2d4523730c73120e10d4652f36ad6010998f4e
|
github.com/docker/go-units 8e2d4523730c73120e10d4652f36ad6010998f4e
|
||||||
github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
|
github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
|
||||||
|
|
||||||
|
@ -31,9 +30,10 @@ github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
|
||||||
github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
|
github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
|
||||||
github.com/mattn/go-shellwords 525bedee691b5a8df547cb5cf9f86b7fb1883e24
|
github.com/mattn/go-shellwords 525bedee691b5a8df547cb5cf9f86b7fb1883e24
|
||||||
github.com/miekg/dns d27455715200c7d3e321a1e5cadb27c9ee0b0f02
|
github.com/miekg/dns d27455715200c7d3e321a1e5cadb27c9ee0b0f02
|
||||||
github.com/opencontainers/runc ba1568de399395774ad84c2ace65937814c542ed
|
github.com/opencontainers/runc 8694d576ea3ce3c9e2c804b7f91b4e1e9a575d1c https://github.com/dmcgowan/runc.git
|
||||||
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
|
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
|
||||||
github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1
|
github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1
|
||||||
|
github.com/sirupsen/logrus v1.0.1
|
||||||
github.com/stretchr/testify dab07ac62d4905d3e48d17dc549c684ac3b7c15a
|
github.com/stretchr/testify dab07ac62d4905d3e48d17dc549c684ac3b7c15a
|
||||||
github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
|
github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
|
||||||
github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
|
github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue