1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #1149 from mrjana/agent

Add libnetwork agent mode support
This commit is contained in:
Santhosh Manohar 2016-05-05 14:33:35 -07:00
commit 63cc2ecf78
17 changed files with 719 additions and 53 deletions

343
libnetwork/agent.go Normal file
View file

@ -0,0 +1,343 @@
package libnetwork
import (
"fmt"
"net"
"os"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/go-events"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/discoverapi"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/networkdb"
)
type agent struct {
networkDB *networkdb.NetworkDB
bindAddr string
epTblCancel func()
driverCancelFuncs map[string][]func()
}
func getBindAddr(ifaceName string) (string, error) {
iface, err := net.InterfaceByName(ifaceName)
if err != nil {
return "", fmt.Errorf("failed to find interface %s: %v", ifaceName, err)
}
addrs, err := iface.Addrs()
if err != nil {
return "", fmt.Errorf("failed to get interface addresses: %v", err)
}
for _, a := range addrs {
addr, ok := a.(*net.IPNet)
if !ok {
continue
}
addrIP := addr.IP
if addrIP.IsLinkLocalUnicast() {
continue
}
return addrIP.String(), nil
}
return "", fmt.Errorf("failed to get bind address")
}
func resolveAddr(addrOrInterface string) (string, error) {
// Try and see if this is a valid IP address
if net.ParseIP(addrOrInterface) != nil {
return addrOrInterface, nil
}
// If not a valid IP address, it should be a valid interface
return getBindAddr(addrOrInterface)
}
func (c *controller) agentInit(bindAddrOrInterface string) error {
if !c.cfg.Daemon.IsAgent {
return nil
}
bindAddr, err := resolveAddr(bindAddrOrInterface)
if err != nil {
return err
}
hostname, _ := os.Hostname()
nDB, err := networkdb.New(&networkdb.Config{
BindAddr: bindAddr,
NodeName: hostname,
})
if err != nil {
return err
}
ch, cancel := nDB.Watch("endpoint_table", "", "")
c.agent = &agent{
networkDB: nDB,
bindAddr: bindAddr,
epTblCancel: cancel,
driverCancelFuncs: make(map[string][]func()),
}
go c.handleTableEvents(ch, c.handleEpTableEvent)
return nil
}
func (c *controller) agentJoin(remotes []string) error {
if c.agent == nil {
return nil
}
return c.agent.networkDB.Join(remotes)
}
func (c *controller) agentDriverNotify(d driverapi.Driver) {
if c.agent == nil {
return
}
d.DiscoverNew(discoverapi.NodeDiscovery, discoverapi.NodeDiscoveryData{
Address: c.agent.bindAddr,
Self: true,
})
}
func (c *controller) agentClose() {
if c.agent == nil {
return
}
for _, cancelFuncs := range c.agent.driverCancelFuncs {
for _, cancel := range cancelFuncs {
cancel()
}
}
c.agent.epTblCancel()
c.agent.networkDB.Close()
}
func (n *network) isClusterEligible() bool {
if n.driverScope() != datastore.GlobalScope {
return false
}
c := n.getController()
if c.agent == nil {
return false
}
return true
}
func (n *network) joinCluster() error {
if !n.isClusterEligible() {
return nil
}
c := n.getController()
return c.agent.networkDB.JoinNetwork(n.ID())
}
func (n *network) leaveCluster() error {
if !n.isClusterEligible() {
return nil
}
c := n.getController()
return c.agent.networkDB.LeaveNetwork(n.ID())
}
func (ep *endpoint) addToCluster() error {
n := ep.getNetwork()
if !n.isClusterEligible() {
return nil
}
c := n.getController()
if !ep.isAnonymous() && ep.Iface().Address() != nil {
if err := c.agent.networkDB.CreateEntry("endpoint_table", n.ID(), ep.ID(), []byte(fmt.Sprintf("%s=%s", ep.Name(), ep.Iface().Address().IP))); err != nil {
return err
}
}
for _, te := range ep.joinInfo.driverTableEntries {
if err := c.agent.networkDB.CreateEntry(te.tableName, n.ID(), te.key, te.value); err != nil {
return err
}
}
return nil
}
func (ep *endpoint) deleteFromCluster() error {
n := ep.getNetwork()
if !n.isClusterEligible() {
return nil
}
c := n.getController()
if !ep.isAnonymous() {
if err := c.agent.networkDB.DeleteEntry("endpoint_table", n.ID(), ep.ID()); err != nil {
return err
}
}
if ep.joinInfo == nil {
return nil
}
for _, te := range ep.joinInfo.driverTableEntries {
if err := c.agent.networkDB.DeleteEntry(te.tableName, n.ID(), te.key); err != nil {
return err
}
}
return nil
}
func (n *network) addDriverWatches() {
if !n.isClusterEligible() {
return
}
c := n.getController()
for _, tableName := range n.driverTables {
ch, cancel := c.agent.networkDB.Watch(tableName, n.ID(), "")
c.Lock()
c.agent.driverCancelFuncs[n.ID()] = append(c.agent.driverCancelFuncs[n.ID()], cancel)
c.Unlock()
go c.handleTableEvents(ch, n.handleDriverTableEvent)
d, err := n.driver(false)
if err != nil {
logrus.Errorf("Could not resolve driver %s while walking driver tabl: %v", n.networkType, err)
return
}
c.agent.networkDB.WalkTable(tableName, func(nid, key string, value []byte) bool {
d.EventNotify(driverapi.Create, n.ID(), tableName, key, value)
return false
})
}
}
func (n *network) cancelDriverWatches() {
if !n.isClusterEligible() {
return
}
c := n.getController()
c.Lock()
cancelFuncs := c.agent.driverCancelFuncs[n.ID()]
delete(c.agent.driverCancelFuncs, n.ID())
c.Unlock()
for _, cancel := range cancelFuncs {
cancel()
}
}
func (c *controller) handleTableEvents(ch chan events.Event, fn func(events.Event)) {
for {
select {
case ev, ok := <-ch:
if !ok {
return
}
fn(ev)
}
}
}
func (n *network) handleDriverTableEvent(ev events.Event) {
d, err := n.driver(false)
if err != nil {
logrus.Errorf("Could not resolve driver %s while handling driver table event: %v", n.networkType, err)
return
}
var (
etype driverapi.EventType
tname string
key string
value []byte
)
switch event := ev.(type) {
case networkdb.CreateEvent:
tname = event.Table
key = event.Key
value = event.Value
etype = driverapi.Create
case networkdb.DeleteEvent:
tname = event.Table
key = event.Key
value = event.Value
etype = driverapi.Delete
case networkdb.UpdateEvent:
tname = event.Table
key = event.Key
value = event.Value
etype = driverapi.Delete
}
d.EventNotify(etype, n.ID(), tname, key, value)
}
func (c *controller) handleEpTableEvent(ev events.Event) {
var (
id string
value string
isAdd bool
)
switch event := ev.(type) {
case networkdb.CreateEvent:
id = event.NetworkID
value = string(event.Value)
isAdd = true
case networkdb.DeleteEvent:
id = event.NetworkID
value = string(event.Value)
case networkdb.UpdateEvent:
logrus.Errorf("Unexpected update service table event = %#v", event)
}
nw, err := c.NetworkByID(id)
if err != nil {
logrus.Errorf("Could not find network %s while handling service table event: %v", id, err)
return
}
n := nw.(*network)
pair := strings.Split(value, "=")
if len(pair) < 2 {
logrus.Errorf("Incorrect service table value = %s", value)
return
}
name := pair[0]
ip := net.ParseIP(pair[1])
if name == "" || ip == nil {
logrus.Errorf("Invalid endpoint name/ip received while handling service table event %s", value)
return
}
if isAdd {
n.addSvcRecords(name, ip, nil, true)
} else {
n.deleteSvcRecords(name, ip, nil, true)
}
}

View file

@ -307,7 +307,17 @@ func procCreateNetwork(c libnetwork.NetworkController, vars map[string]string, b
if len(create.DriverOpts) > 0 {
options = append(options, libnetwork.NetworkOptionDriverOpts(create.DriverOpts))
}
nw, err := c.NewNetwork(create.NetworkType, create.Name, "", options...)
if len(create.IPv4Conf) > 0 {
ipamV4Conf := &libnetwork.IpamConf{
PreferredPool: create.IPv4Conf[0].PreferredPool,
SubPool: create.IPv4Conf[0].SubPool,
}
options = append(options, libnetwork.NetworkOptionIpam("default", "", []*libnetwork.IpamConf{ipamV4Conf}, nil, nil))
}
nw, err := c.NewNetwork(create.NetworkType, create.Name, create.ID, options...)
if err != nil {
return nil, convertNetworkError(err)
}
@ -697,6 +707,7 @@ func procAttachBackend(c libnetwork.NetworkController, vars map[string]string, b
if err != nil {
return nil, convertNetworkError(err)
}
return sb.Key(), &successResponse
}

View file

@ -32,10 +32,19 @@ type sandboxResource struct {
Body types
************/
type ipamConf struct {
PreferredPool string
SubPool string
Gateway string
AuxAddresses map[string]string
}
// networkCreate is the expected body of the "create network" http request message
type networkCreate struct {
Name string `json:"name"`
ID string `json:"id"`
NetworkType string `json:"network_type"`
IPv4Conf []ipamConf `json:"ipv4_configuration"`
DriverOpts map[string]string `json:"driver_opts"`
NetworkOpts map[string]string `json:"network_opts"`
}

View file

@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"net/http"
"strings"
"text/tabwriter"
flag "github.com/docker/docker/pkg/mflag"
@ -42,8 +43,13 @@ func (cli *NetworkCli) CmdNetwork(chain string, args ...string) error {
func (cli *NetworkCli) CmdNetworkCreate(chain string, args ...string) error {
cmd := cli.Subcmd(chain, "create", "NETWORK-NAME", "Creates a new network with a name specified by the user", false)
flDriver := cmd.String([]string{"d", "-driver"}, "", "Driver to manage the Network")
flID := cmd.String([]string{"-id"}, "", "Network ID string")
flOpts := cmd.String([]string{"o", "-opt"}, "", "Network options")
flInternal := cmd.Bool([]string{"-internal"}, false, "Config the network to be internal")
flIPv6 := cmd.Bool([]string{"-ipv6"}, false, "Enable IPv6 on the network")
flSubnet := cmd.String([]string{"-subnet"}, "", "Subnet option")
flRange := cmd.String([]string{"-ip-range"}, "", "Range option")
cmd.Require(flag.Exact, 1)
err := cmd.ParseFlags(args, true)
if err != nil {
@ -56,9 +62,30 @@ func (cli *NetworkCli) CmdNetworkCreate(chain string, args ...string) error {
if *flIPv6 {
networkOpts[netlabel.EnableIPv6] = "true"
}
driverOpts := make(map[string]string)
if *flOpts != "" {
opts := strings.Split(*flOpts, ",")
for _, opt := range opts {
driverOpts[netlabel.Key(opt)] = netlabel.Value(opt)
}
}
var icList []ipamConf
if *flSubnet != "" {
ic := ipamConf{
PreferredPool: *flSubnet,
}
if *flRange != "" {
ic.SubPool = *flRange
}
icList = append(icList, ic)
}
// Construct network create request body
var driverOpts []string
nc := networkCreate{Name: cmd.Arg(0), NetworkType: *flDriver, DriverOpts: driverOpts, NetworkOpts: networkOpts}
nc := networkCreate{Name: cmd.Arg(0), NetworkType: *flDriver, ID: *flID, IPv4Conf: icList, DriverOpts: driverOpts, NetworkOpts: networkOpts}
obj, _, err := readBody(cli.call("POST", "/networks", nc, nil))
if err != nil {
return err

View file

@ -31,12 +31,20 @@ type SandboxResource struct {
/***********
Body types
************/
type ipamConf struct {
PreferredPool string
SubPool string
Gateway string
AuxAddresses map[string]string
}
// networkCreate is the expected body of the "create network" http request message
type networkCreate struct {
Name string `json:"name"`
ID string `json:"id"`
NetworkType string `json:"network_type"`
DriverOpts []string `json:"driver_opts"`
IPv4Conf []ipamConf `json:"ipv4_configuration"`
DriverOpts map[string]string `json:"driver_opts"`
NetworkOpts map[string]string `json:"network_opts"`
}

View file

@ -91,6 +91,15 @@ func processConfig(cfg *config.Config) []config.Option {
dd = cfg.Daemon.DefaultDriver
}
options = append(options, config.OptionDefaultDriver(dd))
if cfg.Daemon.IsAgent {
options = append(options, config.OptionAgent())
}
if cfg.Daemon.Bind != "" {
options = append(options, config.OptionBind(cfg.Daemon.Bind))
}
options = append(options, config.OptionNeighbors(cfg.Daemon.Neighbors))
if cfg.Daemon.Labels != nil {
options = append(options, config.OptionLabels(cfg.Daemon.Labels))

View file

@ -22,9 +22,12 @@ type Config struct {
// DaemonCfg represents libnetwork core configuration
type DaemonCfg struct {
Debug bool
IsAgent bool
DataDir string
DefaultNetwork string
DefaultDriver string
Bind string
Neighbors []string
Labels []string
DriverCfg map[string]interface{}
}
@ -81,6 +84,27 @@ func ParseConfigOptions(cfgOptions ...Option) *Config {
// to the controller
type Option func(c *Config)
// OptionBind function returns an option setter for setting a bind interface or address
func OptionBind(bind string) Option {
return func(c *Config) {
c.Daemon.Bind = bind
}
}
// OptionAgent function returns an option setter for setting agent mode
func OptionAgent() Option {
return func(c *Config) {
c.Daemon.IsAgent = true
}
}
// OptionNeighbors function returns an option setter for setting a list of neighbors to join.
func OptionNeighbors(neighbors []string) Option {
return func(c *Config) {
c.Daemon.Neighbors = neighbors
}
}
// OptionDefaultNetwork function returns an option setter for a default network
func OptionDefaultNetwork(dn string) Option {
return func(c *Config) {

View file

@ -136,6 +136,7 @@ type controller struct {
nmap map[string]*netWatch
defOsSbox osl.Sandbox
sboxOnce sync.Once
agent *agent
sync.Mutex
}
@ -153,6 +154,14 @@ func New(cfgOptions ...config.Option) (NetworkController, error) {
svcDb: make(map[string]svcInfo),
}
if err := c.agentInit(c.cfg.Daemon.Bind); err != nil {
return nil, err
}
if err := c.agentJoin(c.cfg.Daemon.Neighbors); err != nil {
return nil, err
}
if err := c.initStores(); err != nil {
return nil, err
}
@ -235,6 +244,28 @@ func (c *controller) makeDriverConfig(ntype string) map[string]interface{} {
var procReloadConfig = make(chan (bool), 1)
func (c *controller) processAgentConfig(cfg *config.Config) (bool, error) {
if c.cfg.Daemon.IsAgent == cfg.Daemon.IsAgent {
// Agent configuration not changed
return false, nil
}
c.Lock()
c.cfg = cfg
c.Unlock()
if err := c.agentInit(c.cfg.Daemon.Bind); err != nil {
return false, err
}
if err := c.agentJoin(c.cfg.Daemon.Neighbors); err != nil {
c.agentClose()
return false, err
}
return true, nil
}
func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error {
procReloadConfig <- true
defer func() { <-procReloadConfig }()
@ -243,6 +274,16 @@ func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error {
// Refuse the configuration if it alters an existing datastore client configuration.
update := false
cfg := config.ParseConfigOptions(cfgOptions...)
isAgentConfig, err := c.processAgentConfig(cfg)
if err != nil {
return err
}
if isAgentConfig {
return nil
}
for s := range c.cfg.Scopes {
if _, ok := cfg.Scopes[s]; !ok {
return types.ForbiddenErrorf("cannot accept new configuration because it removes an existing datastore client")
@ -265,16 +306,6 @@ func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error {
return nil
}
c.Lock()
c.cfg = cfg
c.Unlock()
if c.discovery == nil && c.cfg.Cluster.Watcher != nil {
if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil {
log.Errorf("Failed to Initialize Discovery after configuration update: %v", err)
}
}
var dsConfig *discoverapi.DatastoreConfigData
for scope, sCfg := range cfg.Scopes {
if scope == datastore.LocalScope || !sCfg.IsValid() {
@ -308,6 +339,12 @@ func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error {
return false
})
if c.discovery == nil && c.cfg.Cluster.Watcher != nil {
if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil {
log.Errorf("Failed to Initialize Discovery after configuration update: %v", err)
}
}
return nil
}
@ -421,6 +458,7 @@ func (c *controller) RegisterDriver(networkType string, driver driverapi.Driver,
c.pushNodeDiscovery(driver, capability, hd.Fetch(), true)
}
c.agentDriverNotify(driver)
return nil
}
@ -465,7 +503,8 @@ func (c *controller) NewNetwork(networkType, name string, id string, options ...
}
}()
if err = c.addNetwork(network); err != nil {
err = c.addNetwork(network)
if err != nil {
return nil, err
}
defer func() {
@ -496,6 +535,12 @@ func (c *controller) NewNetwork(networkType, name string, id string, options ...
return nil, err
}
if err = network.joinCluster(); err != nil {
log.Errorf("Failed to join network %s into agent cluster: %v", name, err)
}
network.addDriverWatches()
return network, nil
}
@ -506,7 +551,7 @@ func (c *controller) addNetwork(n *network) error {
}
// Create the network
if err := d.CreateNetwork(n.id, n.generic, nil, n.getIPData(4), n.getIPData(6)); err != nil {
if err := d.CreateNetwork(n.id, n.generic, n, n.getIPData(4), n.getIPData(6)); err != nil {
return err
}

View file

@ -147,10 +147,14 @@ func (d *driver) nodeJoin(node string, self bool) {
d.Lock()
d.bindAddress = node
d.Unlock()
err := d.serfInit()
if err != nil {
logrus.Errorf("initializing serf instance failed: %v", err)
return
// If there is no cluster store there is no need to start serf.
if d.store != nil {
err := d.serfInit()
if err != nil {
logrus.Errorf("initializing serf instance failed: %v", err)
return
}
}
}

View file

@ -5,11 +5,18 @@ import (
"testing"
"time"
"github.com/docker/libkv/store/consul"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/discoverapi"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/netlabel"
_ "github.com/docker/libnetwork/testutils"
)
func init() {
consul.Register()
}
type driverTester struct {
t *testing.T
d *driver
@ -19,7 +26,14 @@ const testNetworkType = "overlay"
func setupDriver(t *testing.T) *driverTester {
dt := &driverTester{t: t}
if err := Init(dt, nil); err != nil {
config := make(map[string]interface{})
config[netlabel.GlobalKVClient] = discoverapi.DatastoreConfigData{
Scope: datastore.GlobalScope,
Provider: "consul",
Address: "127.0.0.01:8500",
}
if err := Init(dt, config); err != nil {
t.Fatal(err)
}

View file

@ -446,6 +446,10 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) error {
return err
}
if e := ep.addToCluster(); e != nil {
log.Errorf("Could not update state for endpoint %s into cluster: %v", ep.Name(), e)
}
if sb.needDefaultGW() && sb.getEndpointInGWNetwork() == nil {
return sb.setupDefaultGW()
}
@ -632,6 +636,10 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
return err
}
if e := ep.deleteFromCluster(); e != nil {
log.Errorf("Could not delete state for endpoint %s from cluster: %v", ep.Name(), e)
}
sb.deleteHostsEntries(n.getSvcRecords(ep))
if !sb.inDelete && sb.needDefaultGW() && sb.getEndpointInGWNetwork() == nil {
return sb.setupDefaultGW()

View file

@ -143,9 +143,16 @@ type endpointJoinInfo struct {
gw net.IP
gw6 net.IP
StaticRoutes []*types.StaticRoute
driverTableEntries []*tableEntry
disableGatewayService bool
}
type tableEntry struct {
tableName string
key string
value []byte
}
func (ep *endpoint) Info() EndpointInfo {
n, err := ep.getNetworkFromStore()
if err != nil {
@ -293,6 +300,15 @@ func (ep *endpoint) AddStaticRoute(destination *net.IPNet, routeType int, nextHo
}
func (ep *endpoint) AddTableEntry(tableName, key string, value []byte) error {
ep.Lock()
defer ep.Unlock()
ep.joinInfo.driverTableEntries = append(ep.joinInfo.driverTableEntries, &tableEntry{
tableName: tableName,
key: key,
value: value,
})
return nil
}

View file

@ -171,6 +171,7 @@ type network struct {
drvOnce *sync.Once
internal bool
inDelete bool
driverTables []string
sync.Mutex
}
@ -662,8 +663,15 @@ func (n *network) driver(load bool) (driverapi.Driver, error) {
return nil, err
}
c := n.getController()
n.Lock()
n.scope = cap.DataScope
if c.cfg.Daemon.IsAgent {
// If we are running in agent mode then all networks
// in libnetwork are local scope regardless of the
// backing driver.
n.scope = datastore.LocalScope
}
n.Unlock()
return d, nil
}
@ -720,6 +728,12 @@ func (n *network) delete(force bool) error {
return fmt.Errorf("error deleting network from store: %v", err)
}
n.cancelDriverWatches()
if err = n.leaveCluster(); err != nil {
log.Errorf("Failed leaving network %s from the agent cluster: %v", n.Name(), err)
}
return nil
}
@ -1429,3 +1443,11 @@ func (n *network) Labels() map[string]string {
return lbls
}
func (n *network) TableEventRegister(tableName string) error {
n.Lock()
defer n.Unlock()
n.driverTables = append(n.driverTables, tableName)
return nil
}

View file

@ -38,9 +38,11 @@ func (nDB *NetworkDB) clusterInit() error {
config := memberlist.DefaultLANConfig()
config.Name = nDB.config.NodeName
config.BindAddr = nDB.config.BindAddr
if nDB.config.BindPort != 0 {
config.BindPort = nDB.config.BindPort
}
config.ProtocolVersion = memberlist.ProtocolVersionMax
config.Delegate = &delegate{nDB: nDB}
config.Events = &eventDelegate{nDB: nDB}

View file

@ -10,6 +10,10 @@ function dnet_container_name() {
echo dnet-$1-$2
}
function dnet_container_ip() {
docker inspect --format '{{.NetworkSettings.IPAddress}}' dnet-$1-$2
}
function get_sbox_id() {
local line
@ -18,17 +22,17 @@ function get_sbox_id() {
}
function net_connect() {
local al gl
if [ -n "$4" ]; then
if [ "${4}" != ":" ]; then
al="--alias=${4}"
fi
fi
if [ -n "$5" ]; then
gl="--alias=${5}"
fi
dnet_cmd $(inst_id2port ${1}) service publish $gl ${2}.${3}
dnet_cmd $(inst_id2port ${1}) service attach $al ${2} ${2}.${3}
local al gl
if [ -n "$4" ]; then
if [ "${4}" != ":" ]; then
al="--alias=${4}"
fi
fi
if [ -n "$5" ]; then
gl="--alias=${5}"
fi
dnet_cmd $(inst_id2port ${1}) service publish $gl ${2}.${3}
dnet_cmd $(inst_id2port ${1}) service attach $al ${2} ${2}.${3}
}
function net_disconnect() {
@ -107,7 +111,7 @@ function parse_discovery_str() {
}
function start_dnet() {
local inst suffix name hport cport hopt store bridge_ip labels tomlfile
local inst suffix name hport cport hopt store bridge_ip labels tomlfile nip
local discovery provider address
inst=$1
@ -115,13 +119,16 @@ function start_dnet() {
suffix=$1
shift
stop_dnet ${inst} ${suffix}
name=$(dnet_container_name ${inst} ${suffix})
store=$(echo $suffix | cut -d":" -f1)
nip=$(echo $suffix | cut -s -d":" -f2)
stop_dnet ${inst} ${store}
name=$(dnet_container_name ${inst} ${store})
hport=$((41000+${inst}-1))
cport=2385
hopt=""
store=${suffix}
while [ -n "$1" ]
do
@ -138,21 +145,32 @@ function start_dnet() {
bridge_ip=$(get_docker_bridge_ip)
echo "start_dnet parsed values: " ${inst} ${suffix} ${name} ${hport} ${cport} ${hopt} ${store} ${labels}
echo "start_dnet parsed values: " ${inst} ${suffix} ${name} ${hport} ${cport} ${hopt} ${store}
mkdir -p /tmp/dnet/${name}
tomlfile="/tmp/dnet/${name}/libnetwork.toml"
# Try discovery URLs with or without path
neigh_ip=""
neighbors=""
if [ "$store" = "zookeeper" ]; then
read discovery provider address < <(parse_discovery_str zk://${bridge_ip}:2182)
elif [ "$store" = "etcd" ]; then
read discovery provider address < <(parse_discovery_str etcd://${bridge_ip}:42000/custom_prefix)
else
elif [ "$store" = "consul" ]; then
read discovery provider address < <(parse_discovery_str consul://${bridge_ip}:8500/custom_prefix)
else
if [ "$nip" != "" ]; then
neighbors="neighbors = [\"${nip}:7946\"]"
fi
discovery=""
provider=""
address=""
fi
cat > ${tomlfile} <<EOF
if [ "$discovery" != "" ]; then
cat > ${tomlfile} <<EOF
title = "LibNetwork Configuration file for ${name}"
[daemon]
@ -166,9 +184,22 @@ title = "LibNetwork Configuration file for ${name}"
provider = "${provider}"
address = "${address}"
EOF
else
cat > ${tomlfile} <<EOF
title = "LibNetwork Configuration file for ${name}"
[daemon]
debug = false
isagent = true
bind = "eth0"
${neighbors}
EOF
fi
cat ${tomlfile}
docker run \
-d \
--hostname=${name} \
--name=${name} \
--privileged \
-p ${hport}:${cport} \
@ -183,6 +214,19 @@ EOF
wait_for_dnet $(inst_id2port ${inst}) ${name}
}
function start_ovrouter() {
local name=${1}
local parent=${2}
docker run \
-d \
--name=${name} \
--net=container:${parent} \
--volumes-from ${parent} \
-w /go/src/github.com/docker/libnetwork \
mrjana/golang ./cmd/ovrouter/ovrouter eth0
}
function skip_for_circleci() {
if [ -n "$CIRCLECI" ]; then
skip
@ -289,7 +333,7 @@ function test_overlay() {
end=3
# Setup overlay network and connect containers ot it
if [ -z "${2}" -o "${2}" != "skip_add" ]; then
if [ -z "${2}" -o "${2}" != "internal" ]; then
if [ -z "${2}" -o "${2}" != "internal" ]; then
dnet_cmd $(inst_id2port 1) network create -d overlay multihost
else
dnet_cmd $(inst_id2port 1) network create -d overlay --internal multihost
@ -307,7 +351,7 @@ function test_overlay() {
do
if [ -z "${2}" -o "${2}" != "internal" ]; then
runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) \
"ping -c 1 www.google.com"
"ping -c 1 www.google.com"
else
default_route=`runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) "ip route | grep default"`
[ "$default_route" = "" ]
@ -324,33 +368,33 @@ function test_overlay() {
# Setup bridge network and connect containers ot it
if [ -z "${2}" -o "${2}" != "skip_add" ]; then
if [ -z "${2}" -o "${2}" != "internal" ]; then
if [ -z "${2}" -o "${2}" != "internal" ]; then
dnet_cmd $(inst_id2port 1) network create -d bridge br1
dnet_cmd $(inst_id2port 1) network create -d bridge br2
net_connect ${start} container_${start} br1
net_connect ${start} container_${start} br2
# Make sure external connectivity works
# Make sure external connectivity works
runc $(dnet_container_name ${start} $dnet_suffix) $(get_sbox_id ${start} container_${start}) \
"ping -c 1 www.google.com"
"ping -c 1 www.google.com"
net_disconnect ${start} container_${start} br1
net_disconnect ${start} container_${start} br2
# Make sure external connectivity works
# Make sure external connectivity works
runc $(dnet_container_name ${start} $dnet_suffix) $(get_sbox_id ${start} container_${start}) \
"ping -c 1 www.google.com"
"ping -c 1 www.google.com"
dnet_cmd $(inst_id2port 1) network rm br1
dnet_cmd $(inst_id2port 1) network rm br2
# Disconnect from overlay network
net_disconnect ${start} container_${start} multihost
# Disconnect from overlay network
net_disconnect ${start} container_${start} multihost
# Connect to overlay network again
net_connect ${start} container_${start} multihost
# Connect to overlay network again
net_connect ${start} container_${start} multihost
# Make sure external connectivity still works
runc $(dnet_container_name ${start} $dnet_suffix) $(get_sbox_id ${start} container_${start}) \
"ping -c 1 www.google.com"
# Make sure external connectivity still works
runc $(dnet_container_name ${start} $dnet_suffix) $(get_sbox_id ${start} container_${start}) \
"ping -c 1 www.google.com"
fi
fi

View file

@ -0,0 +1,58 @@
# -*- mode: sh -*-
#!/usr/bin/env bats
load helpers
function test_overlay_local() {
dnet_suffix=$1
echo $(docker ps)
start=1
end=3
for i in `seq ${start} ${end}`;
do
echo "iteration count ${i}"
dnet_cmd $(inst_id2port $i) network create -d overlay --id=mhid --subnet=10.1.0.0/16 --ip-range=10.1.${i}.0/24 --opt=com.docker.network.driver.overlay.vxlanid_list=1024 multihost
dnet_cmd $(inst_id2port $i) container create container_${i}
net_connect ${i} container_${i} multihost
done
# Now test connectivity between all the containers using service names
for i in `seq ${start} ${end}`;
do
if [ -z "${2}" -o "${2}" != "internal" ]; then
runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) \
"ping -c 1 www.google.com"
else
default_route=`runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) "ip route | grep default"`
[ "$default_route" = "" ]
fi
for j in `seq ${start} ${end}`;
do
if [ "$i" -eq "$j" ]; then
continue
fi
#runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) "ping -c 1 10.1.${j}.1"
runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) "ping -c 1 container_${j}"
done
done
# Teardown the container connections and the network
for i in `seq ${start} ${end}`;
do
net_disconnect ${i} container_${i} multihost
dnet_cmd $(inst_id2port $i) container rm container_${i}
done
if [ -z "${2}" -o "${2}" != "skip_rm" ]; then
dnet_cmd $(inst_id2port 2) network rm multihost
fi
}
@test "Test overlay network in local scope" {
skip_for_circleci
test_overlay_local local
}
#"ping -c 1 10.1.${j}.1"

View file

@ -34,6 +34,28 @@ function run_bridge_tests() {
unset cmap[dnet-1-bridge]
}
function run_overlay_local_tests() {
## Test overlay network in local scope
## Setup
start_dnet 1 local 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-1-local]=dnet-1-local
start_dnet 2 local:$(dnet_container_ip 1 local) 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-2-local]=dnet-2-local
start_dnet 3 local:$(dnet_container_ip 1 local) 1>>${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet-3-local]=dnet-3-local
## Run the test cases
./integration-tmp/bin/bats ./test/integration/dnet/overlay-local.bats
## Teardown
stop_dnet 1 local 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-local]
stop_dnet 2 local 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-2-local]
stop_dnet 3 local 1>>${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-3-local]
}
function run_overlay_consul_tests() {
## Test overlay network with consul
## Setup