Merge pull request #44193 from thaJeztah/libnetwork_cleanup

libnetwork: cleanup config package, remove old integration tests
This commit is contained in:
Sebastiaan van Stijn 2022-09-27 22:41:32 +02:00 committed by GitHub
commit 173d16b233
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
63 changed files with 69 additions and 1546 deletions

View File

@ -128,7 +128,7 @@ func (iface *lnInterface) Set(s *specs.Spec) error {
s.Hooks = &specs.Hooks{
Prestart: []specs.Hook{{
Path: filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe"),
Args: []string{"libnetwork-setkey", "-exec-root=" + iface.provider.Config().Daemon.ExecRoot, iface.sbx.ContainerID(), shortNetCtlrID},
Args: []string{"libnetwork-setkey", "-exec-root=" + iface.provider.Config().ExecRoot, iface.sbx.ContainerID(), shortNetCtlrID},
}},
}
return nil

View File

@ -465,7 +465,7 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
networkName := mode.NetworkName()
if mode.IsDefault() {
networkName = daemon.netController.Config().Daemon.DefaultNetwork
networkName = daemon.netController.Config().DefaultNetwork
}
if mode.IsUserDefined() {

View File

@ -1374,7 +1374,6 @@ func (daemon *Daemon) networkOptions(pg plugingetter.PluginGetter, activeSandbox
dd := runconfig.DefaultDaemonNetworkMode()
options = []nwconfig.Option{
nwconfig.OptionExperimental(conf.Experimental),
nwconfig.OptionDataDir(conf.Root),
nwconfig.OptionExecRoot(conf.GetExecRoot()),
nwconfig.OptionDefaultDriver(string(dd)),

View File

@ -108,7 +108,7 @@ func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error)
return nil, libnetwork.ErrNoSuchNetwork(name)
}
if name == "" {
name = c.Config().Daemon.DefaultNetwork
name = c.Config().DefaultNetwork
}
return c.NetworkByName(name)
}
@ -316,7 +316,7 @@ func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string
c := daemon.netController
driver := create.Driver
if driver == "" {
driver = c.Config().Daemon.DefaultDriver
driver = c.Config().DefaultDriver
}
nwOptions := []libnetwork.NetworkOption{

View File

@ -223,7 +223,7 @@ func (c *controller) agentSetup(clusterProvider cluster.Provider) error {
listenAddr, _, _ := net.SplitHostPort(listen)
logrus.Infof("Initializing Libnetwork Agent Listen-Addr=%s Local-addr=%s Adv-addr=%s Data-addr=%s Remote-addr-list=%v MTU=%d",
listenAddr, bindAddr, advAddr, dataAddr, remoteAddrList, c.Config().Daemon.NetworkControlPlaneMTU)
listenAddr, bindAddr, advAddr, dataAddr, remoteAddrList, c.Config().NetworkControlPlaneMTU)
if advAddr != "" && agent == nil {
if err := c.agentInit(listenAddr, bindAddr, advAddr, dataAddr); err != nil {
logrus.Errorf("error in agentInit: %v", err)
@ -295,12 +295,12 @@ func (c *controller) agentInit(listenAddr, bindAddrOrInterface, advertiseAddr, d
netDBConf.BindAddr = listenAddr
netDBConf.AdvertiseAddr = advertiseAddr
netDBConf.Keys = keys
if c.Config().Daemon.NetworkControlPlaneMTU != 0 {
if c.Config().NetworkControlPlaneMTU != 0 {
// Consider the MTU remove the IP hdr (IPv4 or IPv6) and the TCP/UDP hdr.
// To be on the safe side let's cut 100 bytes
netDBConf.PacketBufferSize = (c.Config().Daemon.NetworkControlPlaneMTU - 100)
netDBConf.PacketBufferSize = (c.Config().NetworkControlPlaneMTU - 100)
logrus.Debugf("Control plane MTU: %d will initialize NetworkDB with: %d",
c.Config().Daemon.NetworkControlPlaneMTU, netDBConf.PacketBufferSize)
c.Config().NetworkControlPlaneMTU, netDBConf.PacketBufferSize)
}
nDB, err := networkdb.New(netDBConf)
if err != nil {
@ -895,7 +895,6 @@ func (c *controller) handleNodeTableEvent(ev events.Event) {
return
}
c.processNodeDiscovery([]net.IP{nodeAddr.Addr}, isAdd)
}
func (c *controller) handleEpTableEvent(ev events.Event) {

View File

@ -410,7 +410,6 @@ func (h *Handle) Destroy() error {
// ToByteArray converts this handle's data into a byte array
func (h *Handle) ToByteArray() ([]byte, error) {
h.Lock()
defer h.Unlock()
ba := make([]byte, 16)

View File

@ -1,7 +1,6 @@
package config
import (
"os"
"strings"
"github.com/docker/docker/libnetwork/cluster"
@ -11,7 +10,6 @@ import (
"github.com/docker/docker/libnetwork/osl"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/libkv/store"
"github.com/pelletier/go-toml"
"github.com/sirupsen/logrus"
)
@ -22,16 +20,6 @@ const (
// Config encapsulates configurations of various Libnetwork components
type Config struct {
Daemon DaemonCfg
Scopes map[string]*datastore.ScopeCfg
ActiveSandboxes map[string]interface{}
PluginGetter plugingetter.PluginGetter
}
// DaemonCfg represents libnetwork core configuration
type DaemonCfg struct {
Debug bool
Experimental bool
DataDir string
ExecRoot string
DefaultNetwork string
@ -41,48 +29,30 @@ type DaemonCfg struct {
ClusterProvider cluster.Provider
NetworkControlPlaneMTU int
DefaultAddressPool []*ipamutils.NetworkToSplit
Scopes map[string]*datastore.ScopeCfg
ActiveSandboxes map[string]interface{}
PluginGetter plugingetter.PluginGetter
}
// LoadDefaultScopes loads default scope configs for scopes which
// doesn't have explicit user specified configs.
func (c *Config) LoadDefaultScopes(dataDir string) {
for k, v := range datastore.DefaultScopes(dataDir) {
if _, ok := c.Scopes[k]; !ok {
c.Scopes[k] = v
// New creates a new Config and initializes it with the given Options.
func New(opts ...Option) *Config {
cfg := &Config{
DriverCfg: make(map[string]interface{}),
Scopes: make(map[string]*datastore.ScopeCfg),
}
for _, opt := range opts {
if opt != nil {
opt(cfg)
}
}
}
// ParseConfig parses the libnetwork configuration file
func ParseConfig(tomlCfgFile string) (*Config, error) {
cfg := &Config{
Scopes: map[string]*datastore.ScopeCfg{},
// load default scope configs which don't have explicit user specified configs.
for k, v := range datastore.DefaultScopes(cfg.DataDir) {
if _, ok := cfg.Scopes[k]; !ok {
cfg.Scopes[k] = v
}
}
data, err := os.ReadFile(tomlCfgFile)
if err != nil {
return nil, err
}
if err := toml.Unmarshal(data, cfg); err != nil {
return nil, err
}
cfg.LoadDefaultScopes(cfg.Daemon.DataDir)
return cfg, nil
}
// ParseConfigOptions parses the configuration options and returns
// a reference to the corresponding Config structure
func ParseConfigOptions(cfgOptions ...Option) *Config {
cfg := &Config{
Daemon: DaemonCfg{
DriverCfg: make(map[string]interface{}),
},
Scopes: make(map[string]*datastore.ScopeCfg),
}
cfg.ProcessOptions(cfgOptions...)
cfg.LoadDefaultScopes(cfg.Daemon.DataDir)
return cfg
}
@ -94,7 +64,7 @@ type Option func(c *Config)
func OptionDefaultNetwork(dn string) Option {
return func(c *Config) {
logrus.Debugf("Option DefaultNetwork: %s", dn)
c.Daemon.DefaultNetwork = strings.TrimSpace(dn)
c.DefaultNetwork = strings.TrimSpace(dn)
}
}
@ -102,21 +72,21 @@ func OptionDefaultNetwork(dn string) Option {
func OptionDefaultDriver(dd string) Option {
return func(c *Config) {
logrus.Debugf("Option DefaultDriver: %s", dd)
c.Daemon.DefaultDriver = strings.TrimSpace(dd)
c.DefaultDriver = strings.TrimSpace(dd)
}
}
// OptionDefaultAddressPoolConfig function returns an option setter for default address pool
func OptionDefaultAddressPoolConfig(addressPool []*ipamutils.NetworkToSplit) Option {
return func(c *Config) {
c.Daemon.DefaultAddressPool = addressPool
c.DefaultAddressPool = addressPool
}
}
// OptionDriverConfig returns an option setter for driver configuration.
func OptionDriverConfig(networkType string, config map[string]interface{}) Option {
return func(c *Config) {
c.Daemon.DriverCfg[networkType] = config
c.DriverCfg[networkType] = config
}
}
@ -125,7 +95,7 @@ func OptionLabels(labels []string) Option {
return func(c *Config) {
for _, label := range labels {
if strings.HasPrefix(label, netlabel.Prefix) {
c.Daemon.Labels = append(c.Daemon.Labels, label)
c.Labels = append(c.Labels, label)
}
}
}
@ -134,14 +104,14 @@ func OptionLabels(labels []string) Option {
// OptionDataDir function returns an option setter for data folder
func OptionDataDir(dataDir string) Option {
return func(c *Config) {
c.Daemon.DataDir = dataDir
c.DataDir = dataDir
}
}
// OptionExecRoot function returns an option setter for exec root folder
func OptionExecRoot(execRoot string) Option {
return func(c *Config) {
c.Daemon.ExecRoot = execRoot
c.ExecRoot = execRoot
osl.SetBasePath(execRoot)
}
}
@ -153,14 +123,6 @@ func OptionPluginGetter(pg plugingetter.PluginGetter) Option {
}
}
// OptionExperimental function returns an option setter for experimental daemon
func OptionExperimental(exp bool) Option {
return func(c *Config) {
logrus.Debugf("Option Experimental: %v", exp)
c.Daemon.Experimental = exp
}
}
// OptionNetworkControlPlaneMTU function returns an option setter for control plane MTU
func OptionNetworkControlPlaneMTU(exp int) Option {
return func(c *Config) {
@ -172,16 +134,7 @@ func OptionNetworkControlPlaneMTU(exp int) Option {
exp = minimumNetworkControlPlaneMTU
}
}
c.Daemon.NetworkControlPlaneMTU = exp
}
}
// ProcessOptions processes options and stores it in config
func (c *Config) ProcessOptions(options ...Option) {
for _, opt := range options {
if opt != nil {
opt(c)
}
c.NetworkControlPlaneMTU = exp
}
}

View File

@ -7,20 +7,6 @@ import (
"github.com/docker/docker/libnetwork/netlabel"
)
func TestInvalidConfig(t *testing.T) {
_, err := ParseConfig("invalid.toml")
if err == nil {
t.Fatal("Invalid Configuration file must fail")
}
}
func TestConfig(t *testing.T) {
_, err := ParseConfig("libnetwork.toml")
if err != nil {
t.Fatal("Error parsing a valid configuration file :", err)
}
}
func TestOptionsLabels(t *testing.T) {
c := &Config{}
l := []string{
@ -31,10 +17,10 @@ func TestOptionsLabels(t *testing.T) {
}
f := OptionLabels(l)
f(c)
if len(c.Daemon.Labels) != 3 {
t.Fatalf("Expecting 3 labels, seen %d", len(c.Daemon.Labels))
if len(c.Labels) != 3 {
t.Fatalf("Expecting 3 labels, seen %d", len(c.Labels))
}
for _, l := range c.Daemon.Labels {
for _, l := range c.Labels {
if !strings.HasPrefix(l, netlabel.Prefix) {
t.Fatalf("config must accept only libnetwork labels. Not : %s", l)
}

View File

@ -1,9 +0,0 @@
title = "LibNetwork Configuration file"
[daemon]
debug = false
[cluster]
discovery = "token://swarm-discovery-token"
Address = "Cluster-wide reachable Host IP"
[datastore]
embedded = false

View File

@ -186,7 +186,7 @@ type initializer struct {
func New(cfgOptions ...config.Option) (NetworkController, error) {
c := &controller{
id: stringid.GenerateRandomID(),
cfg: config.ParseConfigOptions(cfgOptions...),
cfg: config.New(cfgOptions...),
sandboxes: sandboxTable{},
svcRecords: make(map[string]svcInfo),
serviceBindings: make(map[serviceKey]*service),
@ -205,7 +205,7 @@ func New(cfgOptions ...config.Option) (NetworkController, error) {
return nil, err
}
for _, i := range getInitializers(c.cfg.Daemon.Experimental) {
for _, i := range getInitializers() {
var dcfg map[string]interface{}
// External plugins don't need config passed through daemon. They can
@ -219,7 +219,7 @@ func New(cfgOptions ...config.Option) (NetworkController, error) {
}
}
if err = initIPAMDrivers(drvRegistry, nil, c.getStore(datastore.GlobalScope), c.cfg.Daemon.DefaultAddressPool); err != nil {
if err = initIPAMDrivers(drvRegistry, nil, c.getStore(datastore.GlobalScope), c.cfg.DefaultAddressPool); err != nil {
return nil, err
}
@ -249,12 +249,12 @@ func (c *controller) SetClusterProvider(provider cluster.Provider) {
var sameProvider bool
c.Lock()
// Avoids to spawn multiple goroutine for the same cluster provider
if c.cfg.Daemon.ClusterProvider == provider {
if c.cfg.ClusterProvider == provider {
// If the cluster provider is already set, there is already a go routine spawned
// that is listening for events, so nothing to do here
sameProvider = true
} else {
c.cfg.Daemon.ClusterProvider = provider
c.cfg.ClusterProvider = provider
}
c.Unlock()
@ -301,7 +301,7 @@ func (c *controller) getAgent() *agent {
}
func (c *controller) clusterAgentInit() {
clusterProvider := c.cfg.Daemon.ClusterProvider
clusterProvider := c.cfg.ClusterProvider
var keysAvailable bool
for {
eventType := <-clusterProvider.ListenClusterEvents()
@ -408,7 +408,7 @@ func (c *controller) makeDriverConfig(ntype string) map[string]interface{} {
config := make(map[string]interface{})
for _, label := range c.cfg.Daemon.Labels {
for _, label := range c.cfg.Labels {
if !strings.HasPrefix(netlabel.Key(label), netlabel.DriverPrefix+"."+ntype) {
continue
}
@ -416,7 +416,7 @@ func (c *controller) makeDriverConfig(ntype string) map[string]interface{} {
config[netlabel.Key(label)] = netlabel.Value(label)
}
drvCfg, ok := c.cfg.Daemon.DriverCfg[ntype]
drvCfg, ok := c.cfg.DriverCfg[ntype]
if ok {
for k, v := range drvCfg.(map[string]interface{}) {
config[k] = v
@ -447,7 +447,7 @@ func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error {
// For now we accept the configuration reload only as a mean to provide a global store config after boot.
// Refuse the configuration if it alters an existing datastore client configuration.
update := false
cfg := config.ParseConfigOptions(cfgOptions...)
cfg := config.New(cfgOptions...)
for s := range c.cfg.Scopes {
if _, ok := cfg.Scopes[s]; !ok {
@ -580,19 +580,19 @@ func (c *controller) Config() config.Config {
func (c *controller) isManager() bool {
c.Lock()
defer c.Unlock()
if c.cfg == nil || c.cfg.Daemon.ClusterProvider == nil {
if c.cfg == nil || c.cfg.ClusterProvider == nil {
return false
}
return c.cfg.Daemon.ClusterProvider.IsManager()
return c.cfg.ClusterProvider.IsManager()
}
func (c *controller) isAgent() bool {
c.Lock()
defer c.Unlock()
if c.cfg == nil || c.cfg.Daemon.ClusterProvider == nil {
if c.cfg == nil || c.cfg.ClusterProvider == nil {
return false
}
return c.cfg.Daemon.ClusterProvider.IsAgent()
return c.cfg.ClusterProvider.IsAgent()
}
func (c *controller) isDistributedControl() bool {
@ -675,7 +675,6 @@ func (c *controller) NewNetwork(networkType, name string, id string, options ...
if network.scope == datastore.LocalScope && cap.DataScope == datastore.GlobalScope {
return nil, types.ForbiddenErrorf("cannot downgrade network scope for %s networks", networkType)
}
if network.ingress && cap.DataScope != datastore.GlobalScope {
return nil, types.ForbiddenErrorf("Ingress network can only be global scope network")
@ -1038,8 +1037,8 @@ func (c *controller) NewSandbox(containerID string, options ...SandboxOption) (S
if sb.ingress {
c.ingressSandbox = sb
sb.config.hostsPath = filepath.Join(c.cfg.Daemon.DataDir, "/network/files/hosts")
sb.config.resolvConfPath = filepath.Join(c.cfg.Daemon.DataDir, "/network/files/resolv.conf")
sb.config.hostsPath = filepath.Join(c.cfg.DataDir, "/network/files/hosts")
sb.config.resolvConfPath = filepath.Join(c.cfg.DataDir, "/network/files/resolv.conf")
sb.id = "ingress_sbox"
} else if sb.loadBalancerNID != "" {
sb.id = "lb_" + sb.loadBalancerNID
@ -1287,7 +1286,7 @@ func (c *controller) iptablesEnabled() bool {
return false
}
// parse map cfg["bridge"]["generic"]["EnableIPTable"]
cfgBridge, ok := c.cfg.Daemon.DriverCfg["bridge"].(map[string]interface{})
cfgBridge, ok := c.cfg.DriverCfg["bridge"].(map[string]interface{})
if !ok {
return false
}

View File

@ -108,7 +108,6 @@ func TestAtomicKVObjectFlatKey(t *testing.T) {
if err != nil {
t.Fatal(err)
}
}
// dummy data used to test the datastore

View File

@ -37,7 +37,6 @@ func (s *MockStore) Get(key string) (*store.KVPair, error) {
return nil, nil
}
return &store.KVPair{Value: mData.Data, LastIndex: mData.Index}, nil
}
// Put a value at "key"

View File

@ -30,7 +30,6 @@ var procGwNetwork = make(chan (bool), 1)
*/
func (sb *sandbox) setupDefaultGW() error {
// check if the container already has a GW endpoint
if ep := sb.getEndpointInGWNetwork(); ep != nil {
return nil

View File

@ -802,7 +802,6 @@ func (d *driver) createNetwork(config *networkConfiguration) (err error) {
}
func (d *driver) DeleteNetwork(nid string) error {
d.configNetwork.Lock()
defer d.configNetwork.Unlock()

View File

@ -30,7 +30,6 @@ func newLink(parentIP, childIP string, ports []types.TransportPort, bridge strin
ports: ports,
bridge: bridge,
}
}
func (l *link) Enable() error {

View File

@ -86,7 +86,7 @@ func (n *bridgeNetwork) allocatePortsInternal(bindings []types.PortBinding, cont
// validatePortBindingIPv4 validates the port binding, populates the missing Host IP field and returns true
// if this is a valid IPv4 binding, else returns false
func (n *bridgeNetwork) validatePortBindingIPv4(bnd *types.PortBinding, containerIPv4, defHostIP net.IP) bool {
//Return early if there is a valid Host IP, but its not a IPv4 address
// Return early if there is a valid Host IP, but its not a IPv4 address
if len(bnd.HostIP) > 0 && bnd.HostIP.To4() == nil {
return false
}
@ -100,7 +100,6 @@ func (n *bridgeNetwork) validatePortBindingIPv4(bnd *types.PortBinding, containe
}
bnd.IP = containerIPv4
return true
}
// validatePortBindingIPv6 validates the port binding, populates the missing Host IP field and returns true

View File

@ -390,7 +390,6 @@ func removeIPChains(version iptables.IPVersion) {
{Name: IsolationChain2, Table: iptables.Filter, IPTable: ipt},
{Name: oldIsolationChain, Table: iptables.Filter, IPTable: ipt},
} {
if err := chainInfo.Remove(); err != nil {
logrus.Warnf("Failed to remove existing iptables entries in table %s chain %s : %v", chainInfo.Table, chainInfo.Name, err)
}

View File

@ -53,7 +53,6 @@ func TestSetupIPv6(t *testing.T) {
if !found {
t.Fatalf("Bridge device does not have requested IPv6 address %v", bridgeIPv6)
}
}
func TestSetupGatewayIPv6(t *testing.T) {

View File

@ -61,7 +61,6 @@ func (d *driver) initStore(option map[string]interface{}) error {
if err != nil {
return err
}
}
return nil

View File

@ -10,11 +10,10 @@ import (
"fmt"
"hash/fnv"
"net"
"strconv"
"sync"
"syscall"
"strconv"
"github.com/docker/docker/libnetwork/drivers/overlay/overlayutils"
"github.com/docker/docker/libnetwork/iptables"
"github.com/docker/docker/libnetwork/ns"
@ -76,7 +75,6 @@ func (e *encrMap) String() string {
b.WriteString(",")
}
b.WriteString("]")
}
return b.String()
}

View File

@ -561,7 +561,6 @@ func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) erro
}
func (n *network) setupSubnetSandbox(s *subnet, brName, vxlanName string) error {
if hostMode {
// Try to delete stale bridge interface if it exists
if err := deleteInterface(brName); err != nil {
@ -1076,7 +1075,7 @@ func (n *network) releaseVxlanID() ([]uint32, error) {
}
func (n *network) obtainVxlanID(s *subnet) error {
//return if the subnet already has a vxlan id assigned
// return if the subnet already has a vxlan id assigned
if n.vxlanID(s) != 0 {
return nil
}

View File

@ -179,9 +179,7 @@ func (d *driver) resolvePeer(nid string, peerIP net.IP) (net.HardwareAddr, net.I
}
}
func (d *driver) startSerfLoop(eventCh chan serf.Event, notifyCh chan ovNotify,
exitCh chan chan struct{}) {
func (d *driver) startSerfLoop(eventCh chan serf.Event, notifyCh chan ovNotify, exitCh chan chan struct{}) {
for {
select {
case notify, ok := <-notifyCh:

View File

@ -106,7 +106,6 @@ func deleteInterfaceBySubnet(brPrefix string, s *subnet) error {
}
}
return nil
}
func deleteInterface(name string) error {

View File

@ -181,7 +181,6 @@ func Fini(drv driverapi.Driver) {
}
func (d *driver) configure() error {
// Apply OS specific kernel configs if needed
d.initOS.Do(applyOStweaks)

View File

@ -168,9 +168,7 @@ func (d *driver) peerDbSearch(nid string, peerIP net.IP) (*peerKey, *peerEntry,
return pKeyMatched, pEntryMatched, nil
}
func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) {
func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) {
d.peerDb.Lock()
pMap, ok := d.peerDb.mp[nid]
if !ok {
@ -205,9 +203,7 @@ func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask
return b, i
}
func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) {
func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) {
d.peerDb.Lock()
pMap, ok := d.peerDb.mp[nid]
if !ok {
@ -340,9 +336,7 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
}
}
func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
peerMac net.HardwareAddr, vtep net.IP, l2Miss, l3Miss, updateDB, localPeer bool) error {
func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, l2Miss, l3Miss, updateDB, localPeer bool) error {
if err := validateID(nid, eid); err != nil {
return err
}
@ -432,9 +426,7 @@ func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMas
}
}
func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
peerMac net.HardwareAddr, vtep net.IP, localPeer bool) error {
func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, localPeer bool) error {
if err := validateID(nid, eid); err != nil {
return err
}

View File

@ -5,7 +5,7 @@ import (
"github.com/docker/docker/libnetwork/drivers/remote"
)
func getInitializers(experimental bool) []initializer {
func getInitializers() []initializer {
return []initializer{
{null.Init, "null"},
{remote.Init, "remote"},

View File

@ -10,7 +10,7 @@ import (
"github.com/docker/docker/libnetwork/drivers/remote"
)
func getInitializers(experimental bool) []initializer {
func getInitializers() []initializer {
in := []initializer{
{bridge.Init, "bridge"},
{host.Init, "host"},

View File

@ -7,7 +7,7 @@ import (
"github.com/docker/docker/libnetwork/drivers/windows/overlay"
)
func getInitializers(experimental bool) []initializer {
func getInitializers() []initializer {
return []initializer{
{null.Init, "null"},
{overlay.Init, "overlay"},

View File

@ -178,7 +178,6 @@ func (ep *endpoint) UnmarshalJSON(b []byte) (err error) {
tplist = append(tplist, tp)
}
ep.generic[netlabel.ExposedPorts] = tplist
}
}
@ -580,7 +579,6 @@ func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) (err error) {
ep.Name(), ep.ID(), err)
}
}
}
if !sb.needDefaultGW() {

View File

@ -7,7 +7,6 @@ import (
)
func TestErrorInterfaces(t *testing.T) {
badRequestErrorList := []error{ErrInvalidID(""), ErrInvalidName(""), ErrInvalidJoin{}, ErrInvalidNetworkDriver(""), InvalidContainerIDError(""), ErrNoSuchNetwork(""), ErrNoSuchEndpoint("")}
for _, err := range badRequestErrorList {
switch u := err.(type) {
@ -47,5 +46,4 @@ func TestErrorInterfaces(t *testing.T) {
t.Fatalf("Failed to detect err %v is of type ForbiddenError. Got type: %T", err, u)
}
}
}

View File

@ -209,7 +209,6 @@ func TestUpdateIgnoresPrefixedHostname(t *testing.T) {
if expected := "5.5.5.5\tprefix\n3.3.3.3\tprefixAndMore\n4.4.4.4\tunaffectedHost\n"; !bytes.Contains(content, []byte(expected)) {
t.Fatalf("Expected to find '%s' got '%s'", expected, content)
}
}
// This regression test covers the host prefix issue for the

View File

@ -52,7 +52,7 @@ func TestUserChain(t *testing.T) {
tc := tc
t.Run(fmt.Sprintf("iptables=%v,insert=%v", tc.iptables, tc.insert), func(t *testing.T) {
c := nc.(*controller)
c.cfg.Daemon.DriverCfg["bridge"] = map[string]interface{}{
c.cfg.DriverCfg["bridge"] = map[string]interface{}{
netlabel.GenericData: options.Generic{
"EnableIPTables": tc.iptables,
},

View File

@ -1056,7 +1056,6 @@ func TestOverlappingRequests(t *testing.T) {
}
func TestUnusualSubnets(t *testing.T) {
subnet := "192.168.0.2/31"
outsideTheRangeAddresses := []struct {
@ -1075,7 +1074,6 @@ func TestUnusualSubnets(t *testing.T) {
}
for _, store := range []bool{false, true} {
allocator, err := getAllocator(store)
if err != nil {
t.Fatal(err)
@ -1117,7 +1115,6 @@ func TestUnusualSubnets(t *testing.T) {
if err != ipamapi.ErrNoAvailableIPs {
t.Fatal("Did not get expected error when pool is exhausted.")
}
}
}
@ -1264,7 +1261,6 @@ func benchmarkRequest(b *testing.B, a *Allocator, subnet string) {
}
func BenchmarkRequest(b *testing.B) {
subnets := []string{
"10.0.0.0/24",
"10.0.0.0/16",

View File

@ -56,5 +56,4 @@ func TestOtherRequests(t *testing.T) {
if err == nil {
t.Fatal("Unexpected success")
}
}

View File

@ -206,7 +206,7 @@ func getDockerZoneSettings() []interface{} {
description: "zone for docker bridge network interfaces",
target: "ACCEPT",
}
slice := []interface{}{
return []interface{}{
settings.version,
settings.name,
settings.description,
@ -224,8 +224,6 @@ func getDockerZoneSettings() []interface{} {
settings.sourcePorts,
settings.icmpBlockInversion,
}
return slice
}
// setupDockerZone creates a zone called docker in firewalld which includes docker interfaces to allow

View File

@ -90,5 +90,4 @@ func TestPassthrough(t *testing.T) {
t.Fatal("rule1 does not exist")
}
}
}

View File

@ -260,7 +260,6 @@ func (iptable IPTable) ProgramChain(c *ChainInfo, bridgeName string, hairpinMode
} else if len(output) != 0 {
return fmt.Errorf("Could not delete linking rule from %s/%s: %s", c.Table, c.Name, output)
}
}
establish := []string{
"-o", bridgeName,
@ -301,7 +300,6 @@ func (iptable IPTable) RemoveExistingChain(name string, table Table) error {
// Forward adds forwarding rule to 'filter' table and corresponding nat rule to 'nat' table.
func (c *ChainInfo) Forward(action Action, ip net.IP, port int, proto, destAddr string, destPort int, bridgeName string) error {
iptable := GetIptable(c.IPTable.Version)
daddr := ip.String()
if ip.IsUnspecified() {

View File

@ -334,7 +334,6 @@ func TestAuxAddresses(t *testing.T) {
}
for _, i := range input {
n.ipamV4Config = []*IpamConf{{PreferredPool: i.masterPool, SubPool: i.subPool, AuxAddresses: i.auxAddresses}}
err = n.ipamAllocate()

View File

@ -400,7 +400,6 @@ func TestNetworkConfig(t *testing.T) {
if err := configNetwork.Delete(); err != nil {
t.Fatal(err)
}
}
func TestUnknownNetwork(t *testing.T) {
@ -1054,7 +1053,6 @@ func TestEndpointMultipleJoins(t *testing.T) {
if _, ok := err.(types.ForbiddenError); !ok {
t.Fatalf("Failed with unexpected error type: %T. Desc: %s", err, err.Error())
}
}
func TestLeaveAll(t *testing.T) {

View File

@ -1168,7 +1168,6 @@ func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoi
defer n.ctrlr.networkLocker.Unlock(n.id) //nolint:errcheck
return n.createEndpoint(name, options...)
}
func (n *network) createEndpoint(name string, options ...EndpointOption) (Endpoint, error) {
@ -1953,7 +1952,6 @@ func (n *network) TableEventRegister(tableName string, objType driverapi.ObjectT
}
func (n *network) UpdateIpamConfig(ipV4Data []driverapi.IPAMData) {
ipamV4Config := make([]*IpamConf, len(ipV4Data))
for i, data := range ipV4Data {
@ -2233,7 +2231,6 @@ func (n *network) deleteLoadBalancerSandbox() error {
if err != nil {
logrus.Warnf("Failed to find load balancer endpoint %s on network %s: %v", endpointName, name, err)
} else {
info := endpoint.Info()
if info != nil {
sb := info.Sandbox()

View File

@ -200,7 +200,6 @@ func (nDB *NetworkDB) retryJoin(ctx context.Context, members []string) {
return
}
}
}
func (nDB *NetworkDB) clusterJoin(members []string) error {

View File

@ -177,7 +177,6 @@ func GenerateKey(containerID string) string {
index = tmpindex
tmpkey = id
}
}
}
containerID = tmpkey

View File

@ -183,7 +183,6 @@ func (n *networkNamespace) AddStaticRoute(r *types.StaticRoute) error {
}
func (n *networkNamespace) RemoveStaticRoute(r *types.StaticRoute) error {
err := n.removeRoute(n.nsPath(), r.Destination, r.NextHop)
if err == nil {
n.Lock()

View File

@ -321,7 +321,6 @@ func TestSetInterfaceIP(t *testing.T) {
}
func TestLiveRestore(t *testing.T) {
defer testutils.SetupTestOSContext(t)()
key, err := newKey(t)

View File

@ -347,7 +347,6 @@ func (r *resolver) handleSRVQuery(query *dns.Msg) (*dns.Msg, error) {
resp.Extra = append(resp.Extra, rr1)
}
return resp, nil
}
func truncateResp(resp *dns.Msg, maxSize int, isTCP bool) {

View File

@ -166,7 +166,6 @@ func TestDNSIPQuery(t *testing.T) {
t.Log("Response: ", resp.String())
checkDNSResponseCode(t, resp, dns.RcodeServerFailure)
w.ClearResponse()
}
func newDNSHandlerServFailOnce(requests *int) func(w dns.ResponseWriter, r *dns.Msg) {

View File

@ -569,7 +569,6 @@ func (sb *sandbox) ResolveName(name string, ipType int) ([]net.IP, bool) {
}
for i := 0; i < len(reqName); i++ {
// First check for local container alias
ip, ipv6Miss := sb.resolveName(reqName[i], networkName[i], epList, true, ipType)
if ip != nil {

View File

@ -209,9 +209,7 @@ func (sb *sandbox) setupDNS() error {
// When the user specify a conainter in the host namespace and do no have any dns option specified
// we just copy the host resolv.conf from the host itself
if sb.config.useDefaultSandBox &&
len(sb.config.dnsList) == 0 && len(sb.config.dnsSearchList) == 0 && len(sb.config.dnsOptionsList) == 0 {
if sb.config.useDefaultSandBox && len(sb.config.dnsList) == 0 && len(sb.config.dnsSearchList) == 0 && len(sb.config.dnsOptionsList) == 0 {
// We are working under the assumption that the origin file option had been properly expressed by the upper layer
// if not here we are going to error out
if err := copyFile(sb.config.originResolvConfPath, sb.config.resolvConfPath); err != nil {
@ -325,7 +323,7 @@ func (sb *sandbox) updateDNS(ipv6Enabled bool) error {
if currHash != "" && currHash != currRC.Hash {
// Seems the user has changed the container resolv.conf since the last time
// we checked so return without doing anything.
//logrus.Infof("Skipping update of resolv.conf file with ipv6Enabled: %t because file was touched by user", ipv6Enabled)
// logrus.Infof("Skipping update of resolv.conf file with ipv6Enabled: %t because file was touched by user", ipv6Enabled)
return nil
}

View File

@ -114,7 +114,7 @@ func processReturn(r io.Reader) error {
func (c *controller) startExternalKeyListener() error {
execRoot := defaultExecRoot
if v := c.Config().Daemon.ExecRoot; v != "" {
if v := c.Config().ExecRoot; v != "" {
execRoot = v
}
udsBase := filepath.Join(execRoot, execSubdir)

View File

@ -213,7 +213,6 @@ func (c *controller) cleanupServiceBindings(cleanupNID string) {
for _, f := range cleanupFuncs {
f()
}
}
func makeServiceCleanupFunc(c *controller, s *service, nID, eID string, vip net.IP, ip net.IP) func() {
@ -315,7 +314,6 @@ func (c *controller) addServiceBinding(svcName, svcID, nID, eID, containerName s
}
func (c *controller) rmServiceBinding(svcName, svcID, nID, eID, containerName string, vip net.IP, ingressPorts []*PortConfig, serviceAliases []string, taskAliases []string, ip net.IP, method string, deleteSvcRecords bool, fullRemove bool) error {
var rmService bool
skey := serviceKey{

View File

@ -319,7 +319,6 @@ func (c *controller) networkWatchLoop(nw *netWatch, ep *endpoint, ecCh <-chan da
for _, lEp := range delEpMap {
ep.getNetwork().updateSvcRecord(lEp, c.getLocalEps(nw), false)
}
for _, lEp := range addEp {
ep.getNetwork().updateSvcRecord(lEp, c.getLocalEps(nw), true)

View File

@ -14,7 +14,6 @@ func TestBoltdbBackend(t *testing.T) {
defer os.Remove("/tmp/boltdb.db")
config := &store.Config{Bucket: "testBackend"}
testLocalBackend(t, "boltdb", "/tmp/boltdb.db", config)
}
func TestNoPersist(t *testing.T) {

View File

@ -1,34 +0,0 @@
# LibNetwork Integration Tests
Integration tests provide end-to-end testing of LibNetwork and Drivers.
While unit tests verify the code is working as expected by relying on mocks and
artificially created fixtures, integration tests actually use real docker
engines and communicate to it through the CLI.
Note that integration tests do **not** replace unit tests and Docker is used as a good use-case.
As a rule of thumb, code should be tested thoroughly with unit tests.
Integration tests on the other hand are meant to test a specific feature end to end.
Integration tests are written in *bash* using the
[bats](https://github.com/sstephenson/bats) framework.
## Pre-Requisites
1. Bats (https://github.com/sstephenson/bats#installing-bats-from-source)
2. Docker Machine (https://github.com/docker/machine)
3. Virtualbox (as a Docker machine driver)
## Running integration tests
* Start by [installing] (https://github.com/sstephenson/bats#installing-bats-from-source) *bats* on your system.
* If not done already, [install](https://docs.docker.com/machine/) *docker-machine* into /usr/bin
* Make sure Virtualbox is installed as well, which will be used by docker-machine as a driver to launch VMs
In order to run all integration tests, pass *bats* the test path:
```
$ bats test/integration/daemon-configs.bats
```

View File

@ -1,104 +0,0 @@
#!/usr/bin/env bats
load helpers
export DRIVER=virtualbox
export NAME="bats-$DRIVER-daemon-configs"
export MACHINE_STORAGE_PATH=/tmp/machine-bats-daemon-test-$DRIVER
# Default memsize is 1024MB and disksize is 20000MB
# These values are defined in drivers/virtualbox/virtualbox.go
export DEFAULT_MEMSIZE=1024
export DEFAULT_DISKSIZE=20000
export CUSTOM_MEMSIZE=1536
export CUSTOM_DISKSIZE=10000
export CUSTOM_CPUCOUNT=1
export BAD_URL="http://dev.null:9111/bad.iso"
function setup() {
# add sleep because vbox; ugh
sleep 1
}
findDiskSize() {
# SATA-0-0 is usually the boot2disk.iso image
# We assume that SATA 1-0 is root disk VMDK and grab this UUID
# e.g. "SATA-ImageUUID-1-0"="fb5f33a7-e4e3-4cb9-877c-f9415ae2adea"
# TODO(slashk): does this work on Windows ?
run bash -c "VBoxManage showvminfo --machinereadable $NAME | grep SATA-ImageUUID-1-0 | cut -d'=' -f2"
run bash -c "VBoxManage showhdinfo $output | grep "Capacity:" | awk -F' ' '{ print $2 }'"
}
findMemorySize() {
run bash -c "VBoxManage showvminfo --machinereadable $NAME | grep memory= | cut -d'=' -f2"
}
findCPUCount() {
run bash -c "VBoxManage showvminfo --machinereadable $NAME | grep cpus= | cut -d'=' -f2"
}
buildMachineWithOldIsoCheckUpgrade() {
run wget https://github.com/boot2docker/boot2docker/releases/download/v1.4.1/boot2docker.iso -O $MACHINE_STORAGE_PATH/cache/boot2docker.iso
run machine create -d virtualbox $NAME
run machine upgrade $NAME
}
@test "$DRIVER: machine should not exist" {
run machine active $NAME
[ "$status" -eq 1 ]
}
@test "$DRIVER: VM should not exist" {
run VBoxManage showvminfo $NAME
[ "$status" -eq 1 ]
}
@test "$DRIVER: create" {
run machine create -d $DRIVER $NAME
[ "$status" -eq 0 ]
}
@test "$DRIVER: active" {
run machine active $NAME
[ "$status" -eq 0 ]
}
@test "$DRIVER: check default machine memory size" {
findMemorySize
[[ ${output} == "${DEFAULT_MEMSIZE}" ]]
}
@test "$DRIVER: check default machine disksize" {
findDiskSize
[[ ${output} == *"$DEFAULT_DISKSIZE"* ]]
}
@test "$DRIVER: test bridge-ip" {
run machine ssh $NAME sudo /etc/init.d/docker stop
run machine ssh $NAME sudo ifconfig docker0 down
run machine ssh $NAME sudo ip link delete docker0
BIP='--bip=172.168.45.1/24'
set_extra_config $BIP
cat ${TMP_EXTRA_ARGS_FILE} | machine ssh $NAME sudo tee /var/lib/boot2docker/profile
cat ${DAEMON_CFG_FILE} | machine ssh $NAME "sudo tee -a /var/lib/boot2docker/profile"
run machine ssh $NAME sudo /etc/init.d/docker start
run machine ssh $NAME ifconfig docker0
[ "$status" -eq 0 ]
[[ ${lines[1]} =~ "172.168.45.1" ]]
}
@test "$DRIVER: run busybox container" {
run machine ssh $NAME sudo cat /var/lib/boot2docker/profile
run docker $(machine config $NAME) run busybox echo hello world
[ "$status" -eq 0 ]
}
@test "$DRIVER: remove machine" {
run machine rm -f $NAME
}
# Cleanup of machine store should always be the last 'test'
@test "$DRIVER: cleanup" {
run rm -rf $MACHINE_STORAGE_PATH
[ "$status" -eq 0 ]
}

View File

@ -1,4 +0,0 @@
CACERT=/var/lib/boot2docker/ca.pem
SERVERCERT=/var/lib/boot2docker/server-key.pem
SERVERKEY=/var/lib/boot2docker/server.pem
DOCKER_TLS=no

View File

@ -1,287 +0,0 @@
# -*- mode: sh -*-
#!/usr/bin/env bats
load helpers
function test_single_network_connectivity() {
local nw_name start end
nw_name=${1}
start=1
end=${2}
# Create containers and connect them to the network
for i in `seq ${start} ${end}`;
do
dnet_cmd $(inst_id2port 1) container create container_${i}
net_connect 1 container_${i} ${nw_name}
done
# Now test connectivity between all the containers using service names
for i in `seq ${start} ${end}`;
do
if [ "${nw_name}" != "internal" ]; then
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_${i}) \
"ping -c 1 www.google.com"
fi
for j in `seq ${start} ${end}`;
do
if [ "$i" -eq "$j" ]; then
continue
fi
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_${i}) \
"ping -c 1 container_${j}"
done
done
if [ -n "$3" ]; then
return
fi
# Teardown the container connections and the network
for i in `seq ${start} ${end}`;
do
net_disconnect 1 container_${i} ${nw_name}
dnet_cmd $(inst_id2port 1) container rm container_${i}
done
}
@test "Test default bridge network" {
echo $(docker ps)
test_single_network_connectivity bridge 3
}
@test "Test default network dnet restart" {
echo $(docker ps)
for iter in `seq 1 2`;
do
test_single_network_connectivity bridge 3
if [ "$iter" -eq 1 ]; then
docker restart dnet-1-bridge
wait_for_dnet $(inst_id2port 1) dnet-1-bridge
fi
done
}
@test "Test default network dnet ungraceful restart" {
echo $(docker ps)
for iter in `seq 1 2`;
do
if [ "$iter" -eq 1 ]; then
test_single_network_connectivity bridge 3 skip
docker restart dnet-1-bridge
wait_for_dnet $(inst_id2port 1) dnet-1-bridge
else
test_single_network_connectivity bridge 3
fi
done
}
@test "Test bridge network" {
echo $(docker ps)
dnet_cmd $(inst_id2port 1) network create -d bridge singlehost
test_single_network_connectivity singlehost 3
dnet_cmd $(inst_id2port 1) network rm singlehost
}
@test "Test bridge network dnet restart" {
echo $(docker ps)
dnet_cmd $(inst_id2port 1) network create -d bridge singlehost
for iter in `seq 1 2`;
do
test_single_network_connectivity singlehost 3
if [ "$iter" -eq 1 ]; then
docker restart dnet-1-bridge
wait_for_dnet $(inst_id2port 1) dnet-1-bridge
fi
done
dnet_cmd $(inst_id2port 1) network rm singlehost
}
@test "Test bridge network dnet ungraceful restart" {
echo $(docker ps)
dnet_cmd $(inst_id2port 1) network create -d bridge singlehost
for iter in `seq 1 2`;
do
if [ "$iter" -eq 1 ]; then
test_single_network_connectivity singlehost 3 skip
docker restart dnet-1-bridge
wait_for_dnet $(inst_id2port 1) dnet-1-bridge
else
test_single_network_connectivity singlehost 3
fi
done
dnet_cmd $(inst_id2port 1) network rm singlehost
}
@test "Test multiple bridge networks" {
echo $(docker ps)
start=1
end=3
for i in `seq ${start} ${end}`;
do
dnet_cmd $(inst_id2port 1) container create container_${i}
for j in `seq ${start} ${end}`;
do
if [ "$i" -eq "$j" ]; then
continue
fi
if [ "$i" -lt "$j" ]; then
dnet_cmd $(inst_id2port 1) network create -d bridge sh${i}${j}
nw=sh${i}${j}
else
nw=sh${j}${i}
fi
osvc="svc${i}${j}"
dnet_cmd $(inst_id2port 1) service publish ${osvc}.${nw}
dnet_cmd $(inst_id2port 1) service attach container_${i} ${osvc}.${nw}
done
done
for i in `seq ${start} ${end}`;
do
echo ${i1}
for j in `seq ${start} ${end}`;
do
echo ${j1}
if [ "$i" -eq "$j" ]; then
continue
fi
osvc="svc${j}${i}"
echo "pinging ${osvc}"
dnet_cmd $(inst_id2port 1) service ls
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_${i}) "cat /etc/hosts"
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_${i}) "ping -c 1 ${osvc}"
done
done
svcs=(
0,0
2,3
1,3
1,2
)
echo "Test connectivity failure"
for i in `seq ${start} ${end}`;
do
IFS=, read a b <<<"${svcs[$i]}"
osvc="svc${a}${b}"
echo "pinging ${osvc}"
runc_nofail $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_${i}) "ping -c 1 ${osvc}"
[ "${status}" -ne 0 ]
done
for i in `seq ${start} ${end}`;
do
for j in `seq ${start} ${end}`;
do
if [ "$i" -eq "$j" ]; then
continue
fi
if [ "$i" -lt "$j" ]; then
nw=sh${i}${j}
else
nw=sh${j}${i}
fi
osvc="svc${i}${j}"
dnet_cmd $(inst_id2port 1) service detach container_${i} ${osvc}.${nw}
dnet_cmd $(inst_id2port 1) service unpublish ${osvc}.${nw}
done
dnet_cmd $(inst_id2port 1) container rm container_${i}
done
for i in `seq ${start} ${end}`;
do
for j in `seq ${start} ${end}`;
do
if [ "$i" -eq "$j" ]; then
continue
fi
if [ "$i" -lt "$j" ]; then
dnet_cmd $(inst_id2port 1) network rm sh${i}${j}
fi
done
done
}
@test "Test bridge network alias support" {
dnet_cmd $(inst_id2port 1) network create -d bridge br1
dnet_cmd $(inst_id2port 1) container create container_1
net_connect 1 container_1 br1 container_2:c2
dnet_cmd $(inst_id2port 1) container create container_2
net_connect 1 container_2 br1
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_1) "ping -c 1 container_2"
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_1) "ping -c 1 c2"
net_disconnect 1 container_1 br1
net_disconnect 1 container_2 br1
dnet_cmd $(inst_id2port 1) container rm container_1
dnet_cmd $(inst_id2port 1) container rm container_2
dnet_cmd $(inst_id2port 1) network rm br1
}
@test "Test bridge network global alias support" {
dnet_cmd $(inst_id2port 1) network create -d bridge br1
dnet_cmd $(inst_id2port 1) network create -d bridge br2
dnet_cmd $(inst_id2port 1) container create container_1
net_connect 1 container_1 br1 : c1
dnet_cmd $(inst_id2port 1) container create container_2
net_connect 1 container_2 br1 : shared
dnet_cmd $(inst_id2port 1) container create container_3
net_connect 1 container_3 br1 : shared
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_2) "ping -c 1 container_1"
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_2) "ping -c 1 c1"
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_1) "ping -c 1 container_2"
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_1) "ping -c 1 shared"
net_disconnect 1 container_2 br1
dnet_cmd $(inst_id2port 1) container rm container_2
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_1) "ping -c 1 container_3"
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_1) "ping -c 1 shared"
net_disconnect 1 container_1 br1
dnet_cmd $(inst_id2port 1) container rm container_1
net_disconnect 1 container_3 br1
dnet_cmd $(inst_id2port 1) container rm container_3
dnet_cmd $(inst_id2port 1) network rm br1
}
@test "Test bridge network internal network" {
echo $(docker ps)
dnet_cmd $(inst_id2port 1) network create -d bridge --internal internal
dnet_cmd $(inst_id2port 1) container create container_1
# connects to internal network, confirm it can't communicate with outside world
net_connect 1 container_1 internal
run runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_1) "ping -c 1 8.8.8.8"
[[ "$output" == *"1 packets transmitted, 0 packets received, 100% packet loss"* ]]
net_disconnect 1 container_1 internal
# connects to bridge network, confirm it can communicate with outside world
net_connect 1 container_1 bridge
runc $(dnet_container_name 1 bridge) $(get_sbox_id 1 container_1) "ping -c 1 8.8.8.8"
net_disconnect 1 container_1 bridge
dnet_cmd $(inst_id2port 1) container rm container_1
# test communications within internal network
test_single_network_connectivity internal 3
dnet_cmd $(inst_id2port 1) network rm internal
}

View File

@ -1,31 +0,0 @@
#!/usr/bin/env bats
load helpers
@test "Test dnet custom port" {
start_dnet 1 a 4567
dnet_cmd 4567 network ls
stop_dnet 1 a
}
@test "Test dnet invalid custom port" {
start_dnet 1 b 4567
run dnet_cmd 4568 network ls
echo ${output}
[ "$status" -ne 0 ]
stop_dnet 1 b
}
@test "Test dnet invalid params" {
start_dnet 1 c
run dnet_cmd 8080 network ls
echo ${output}
[ "$status" -ne 0 ]
run ./bin/dnet -H=unix://var/run/dnet.sock network ls
echo ${output}
[ "$status" -ne 0 ]
run ./bin/dnet -H= -l=invalid network ls
echo ${output}
[ "$status" -ne 0 ]
stop_dnet 1 c
}

View File

@ -1,478 +0,0 @@
function get_docker_bridge_ip() {
echo $(docker run --rm -it busybox ip route show | grep default | cut -d" " -f3)
}
function inst_id2port() {
echo $((41000+${1}-1))
}
function dnet_container_name() {
echo dnet-$1-$2
}
function dnet_container_ip() {
docker inspect --format '{{.NetworkSettings.IPAddress}}' dnet-$1-$2
}
function get_sbox_id() {
local line
line=$(dnet_cmd $(inst_id2port ${1}) service ls | grep ${2})
echo ${line} | cut -d" " -f5
}
function net_connect() {
local al gl
if [ -n "$4" ]; then
if [ "${4}" != ":" ]; then
al="--alias=${4}"
fi
fi
if [ -n "$5" ]; then
gl="--alias=${5}"
fi
dnet_cmd $(inst_id2port ${1}) service publish $gl ${2}.${3}
dnet_cmd $(inst_id2port ${1}) service attach $al ${2} ${2}.${3}
}
function net_disconnect() {
dnet_cmd $(inst_id2port ${1}) service detach ${2} ${2}.${3}
dnet_cmd $(inst_id2port ${1}) service unpublish ${2}.${3}
}
hrun() {
local e E T oldIFS
[[ ! "$-" =~ e ]] || e=1
[[ ! "$-" =~ E ]] || E=1
[[ ! "$-" =~ T ]] || T=1
set +e
set +E
set +T
output="$("$@" 2>&1)"
status="$?"
oldIFS=$IFS
IFS=$'\n' lines=($output)
[ -z "$e" ] || set -e
[ -z "$E" ] || set -E
[ -z "$T" ] || set -T
IFS=$oldIFS
}
function wait_for_dnet() {
local hport
hport=$1
echo "waiting on dnet to come up ..."
for i in `seq 1 10`;
do
hrun ./bin/dnet -H tcp://127.0.0.1:${hport} network ls
echo ${output}
if [ "$status" -eq 0 ]; then
return
fi
if [[ "${lines[1]}" =~ .*EOF.* ]]
then
docker logs ${2}
fi
echo "still waiting after ${i} seconds"
sleep 1
done
}
function parse_discovery_str() {
local d provider address
discovery=$1
provider=$(echo ${discovery} | cut -d":" -f1)
address=$(echo ${discovery} | cut -d":" -f2):$(echo ${discovery} | cut -d":" -f3)
address=${address:2}
echo "${discovery} ${provider} ${address}"
}
function start_dnet() {
local inst suffix name hport cport hopt store bridge_ip labels tomlfile nip
local discovery provider address
inst=$1
shift
suffix=$1
shift
store=$(echo $suffix | cut -d":" -f1)
nip=$(echo $suffix | cut -s -d":" -f2)
stop_dnet ${inst} ${store}
name=$(dnet_container_name ${inst} ${store})
hport=$((41000+${inst}-1))
cport=2385
hopt=""
while [ -n "$1" ]
do
if [[ "$1" =~ ^[0-9]+$ ]]
then
hport=$1
cport=$1
hopt="-H tcp://0.0.0.0:${cport}"
else
store=$1
fi
shift
done
bridge_ip=$(get_docker_bridge_ip)
echo "start_dnet parsed values: " ${inst} ${suffix} ${name} ${hport} ${cport} ${hopt} ${store}
mkdir -p /tmp/dnet/${name}
tomlfile="/tmp/dnet/${name}/libnetwork.toml"
# Try discovery URLs with or without path
neigh_ip=""
neighbors=""
if [ "$nip" != "" ]; then
neighbors=${nip}
fi
discovery=""
provider=""
address=""
if [ "$discovery" != "" ]; then
cat > ${tomlfile} <<EOF
title = "LibNetwork Configuration file for ${name}"
[daemon]
debug = false
[cluster]
discovery = "${discovery}"
Heartbeat = 10
[scopes]
[scopes.global]
[scopes.global.client]
provider = "${provider}"
address = "${address}"
EOF
else
cat > ${tomlfile} <<EOF
title = "LibNetwork Configuration file for ${name}"
[daemon]
debug = false
[orchestration]
agent = true
bind = "eth0"
peer = "${neighbors}"
EOF
fi
cat ${tomlfile}
docker run \
-d \
--hostname=$(echo ${name} | sed s/_/-/g) \
--name=${name} \
--privileged \
-p ${hport}:${cport} \
-e _OVERLAY_HOST_MODE \
-v $(pwd)/:/go/src/github.com/docker/libnetwork \
-v /tmp:/tmp \
-v $(pwd)/${TMPC_ROOT}:/scratch \
-v /usr/local/bin/runc:/usr/local/bin/runc \
-w /go/src/github.com/docker/libnetwork \
mrjana/golang ./bin/dnet -d -D ${hopt} -c ${tomlfile}
wait_for_dnet $(inst_id2port ${inst}) ${name}
}
function start_ovrouter() {
local name=${1}
local parent=${2}
docker run \
-d \
--name=${name} \
--net=container:${parent} \
--volumes-from ${parent} \
-w /go/src/github.com/docker/libnetwork \
mrjana/golang ./cmd/ovrouter/ovrouter eth0
}
function stop_dnet() {
local name
name=$(dnet_container_name $1 $2)
rm -rf /tmp/dnet/${name} || true
docker rm -f ${name} || true
}
function dnet_cmd() {
local hport
hport=$1
shift
./bin/dnet -H tcp://127.0.0.1:${hport} $*
}
function dnet_exec() {
docker exec -it ${1} bash -c "trap \"echo SIGHUP\" SIGHUP; $2"
}
function runc() {
local dnet
dnet=${1}
shift
dnet_exec ${dnet} "cp /var/lib/docker/network/files/${1}*/* /scratch/rootfs/etc"
dnet_exec ${dnet} "mkdir -p /var/run/netns"
dnet_exec ${dnet} "touch /var/run/netns/c && mount -o bind /var/run/docker/netns/${1} /var/run/netns/c"
dnet_exec ${dnet} "ip netns exec c unshare -fmuip --mount-proc chroot \"/scratch/rootfs\" /bin/sh -c \"/bin/mount -t proc proc /proc && ${2}\""
dnet_exec ${dnet} "umount /var/run/netns/c && rm /var/run/netns/c"
}
function runc_nofail() {
local dnet
dnet=${1}
shift
dnet_exec ${dnet} "cp /var/lib/docker/network/files/${1}*/* /scratch/rootfs/etc"
dnet_exec ${dnet} "mkdir -p /var/run/netns"
dnet_exec ${dnet} "touch /var/run/netns/c && mount -o bind /var/run/docker/netns/${1} /var/run/netns/c"
set +e
dnet_exec ${dnet} "ip netns exec c unshare -fmuip --mount-proc chroot \"/scratch/rootfs\" /bin/sh -c \"/bin/mount -t proc proc /proc && ${2}\""
status="$?"
set -e
dnet_exec ${dnet} "umount /var/run/netns/c && rm /var/run/netns/c"
}
function test_overlay() {
dnet_suffix=$1
echo $(docker ps)
start=1
end=3
# Setup overlay network and connect containers to it
if [ -z "${2}" -o "${2}" != "skip_add" ]; then
if [ -z "${2}" -o "${2}" != "internal" ]; then
dnet_cmd $(inst_id2port 1) network create -d overlay multihost
else
dnet_cmd $(inst_id2port 1) network create -d overlay --internal multihost
fi
fi
for i in `seq ${start} ${end}`;
do
dnet_cmd $(inst_id2port $i) container create container_${i}
net_connect ${i} container_${i} multihost
done
# Now test connectivity between all the containers using service names
for i in `seq ${start} ${end}`;
do
if [ -z "${2}" -o "${2}" != "internal" ]; then
runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) \
"ping -c 1 www.google.com"
else
default_route=`runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) "ip route | grep default"`
[ "$default_route" = "" ]
fi
for j in `seq ${start} ${end}`;
do
if [ "$i" -eq "$j" ]; then
continue
fi
runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) \
"ping -c 1 container_$j"
done
done
# Setup bridge network and connect containers to it
if [ -z "${2}" -o "${2}" != "skip_add" ]; then
if [ -z "${2}" -o "${2}" != "internal" ]; then
dnet_cmd $(inst_id2port 1) network create -d bridge br1
dnet_cmd $(inst_id2port 1) network create -d bridge br2
net_connect ${start} container_${start} br1
net_connect ${start} container_${start} br2
# Make sure external connectivity works
runc $(dnet_container_name ${start} $dnet_suffix) $(get_sbox_id ${start} container_${start}) \
"ping -c 1 www.google.com"
net_disconnect ${start} container_${start} br1
net_disconnect ${start} container_${start} br2
# Make sure external connectivity works
runc $(dnet_container_name ${start} $dnet_suffix) $(get_sbox_id ${start} container_${start}) \
"ping -c 1 www.google.com"
dnet_cmd $(inst_id2port 1) network rm br1
dnet_cmd $(inst_id2port 1) network rm br2
# Disconnect from overlay network
net_disconnect ${start} container_${start} multihost
# Connect to overlay network again
net_connect ${start} container_${start} multihost
# Make sure external connectivity still works
runc $(dnet_container_name ${start} $dnet_suffix) $(get_sbox_id ${start} container_${start}) \
"ping -c 1 www.google.com"
fi
fi
# Teardown the container connections and the network
for i in `seq ${start} ${end}`;
do
net_disconnect ${i} container_${i} multihost
dnet_cmd $(inst_id2port $i) container rm container_${i}
done
if [ -z "${2}" -o "${2}" != "skip_rm" ]; then
dnet_cmd $(inst_id2port 2) network rm multihost
fi
}
function check_etchosts() {
local dnet sbid retval
dnet=${1}
shift
sbid=${1}
shift
retval="true"
for i in $*;
do
run runc ${dnet} ${sbid} "cat /etc/hosts"
if [ "$status" -ne 0 ]; then
retval="${output}"
break
fi
line=$(echo ${output} | grep ${i})
if [ "${line}" == "" ]; then
retval="false"
fi
done
echo ${retval}
}
function test_overlay_singlehost() {
dnet_suffix=$1
shift
echo $(docker ps)
start=1
end=3
# Setup overlay network and connect containers to it
dnet_cmd $(inst_id2port 1) network create -d overlay multihost
for i in `seq ${start} ${end}`;
do
dnet_cmd $(inst_id2port 1) container create container_${i}
net_connect 1 container_${i} multihost
done
# Now test connectivity between all the containers using service names
for i in `seq ${start} ${end}`;
do
for j in `seq ${start} ${end}`;
do
if [ "$i" -eq "$j" ]; then
continue
fi
runc $(dnet_container_name 1 $dnet_suffix) $(get_sbox_id 1 container_${i}) \
"ping -c 1 container_$j"
done
done
# Teardown the container connections and the network
for i in `seq ${start} ${end}`;
do
net_disconnect 1 container_${i} multihost
dnet_cmd $(inst_id2port 1) container rm container_${i}
done
dnet_cmd $(inst_id2port 1) network rm multihost
}
function test_overlay_hostmode() {
dnet_suffix=$1
shift
echo $(docker ps)
start=1
end=2
# Setup overlay network and connect containers to it
dnet_cmd $(inst_id2port 1) network create -d overlay multihost1
dnet_cmd $(inst_id2port 1) network create -d overlay multihost2
dnet_cmd $(inst_id2port 1) network ls
for i in `seq ${start} ${end}`;
do
dnet_cmd $(inst_id2port 1) container create mh1_${i}
net_connect 1 mh1_${i} multihost1
done
for i in `seq ${start} ${end}`;
do
dnet_cmd $(inst_id2port 1) container create mh2_${i}
net_connect 1 mh2_${i} multihost2
done
# Now test connectivity between all the containers using service names
for i in `seq ${start} ${end}`;
do
for j in `seq ${start} ${end}`;
do
if [ "$i" -eq "$j" ]; then
continue
fi
# Find the IP addresses of the j containers on both networks
hrun runc $(dnet_container_name 1 $dnet_suffix) $(get_sbox_id 1 mh1_${i}) "nslookup mh1_$j"
mh1_j_ip=$(echo ${output} | awk '{print $11}')
hrun runc $(dnet_container_name 1 $dnet_suffix) $(get_sbox_id 1 mh2_${i}) "nslookup mh2_$j"
mh2_j_ip=$(echo ${output} | awk '{print $11}')
# Ping the j containers in the same network and ensure they are successful
runc $(dnet_container_name 1 $dnet_suffix) $(get_sbox_id 1 mh1_${i}) \
"ping -c 1 mh1_$j"
runc $(dnet_container_name 1 $dnet_suffix) $(get_sbox_id 1 mh2_${i}) \
"ping -c 1 mh2_$j"
# Try pinging j container IPs from the container in the other network and make sure that they are not successful
runc_nofail $(dnet_container_name 1 $dnet_suffix) $(get_sbox_id 1 mh1_${i}) "ping -c 1 ${mh2_j_ip}"
[ "${status}" -ne 0 ]
runc_nofail $(dnet_container_name 1 $dnet_suffix) $(get_sbox_id 1 mh2_${i}) "ping -c 1 ${mh1_j_ip}"
[ "${status}" -ne 0 ]
# Try pinging the j container IPS from the host(dnet container in this case) and make syre that they are not successful
hrun docker exec -it $(dnet_container_name 1 $dnet_suffix) "ping -c 1 ${mh1_j_ip}"
[ "${status}" -ne 0 ]
hrun docker exec -it $(dnet_container_name 1 $dnet_suffix) "ping -c 1 ${mh2_j_ip}"
[ "${status}" -ne 0 ]
done
done
# Teardown the container connections and the network
for i in `seq ${start} ${end}`;
do
net_disconnect 1 mh1_${i} multihost1
dnet_cmd $(inst_id2port 1) container rm mh1_${i}
done
for i in `seq ${start} ${end}`;
do
net_disconnect 1 mh2_${i} multihost2
dnet_cmd $(inst_id2port 1) container rm mh2_${i}
done
dnet_cmd $(inst_id2port 1) network rm multihost1
dnet_cmd $(inst_id2port 1) network rm multihost2
}

View File

@ -1,130 +0,0 @@
# -*- mode: sh -*-
#!/usr/bin/env bats
load helpers
function is_network_exist() {
line=$(dnet_cmd $(inst_id2port $1) network ls | grep ${2})
name=$(echo ${line} | cut -d" " -f2)
driver=$(echo ${line} | cut -d" " -f3)
if [ "$name" == "$2" -a "$driver" == "$3" ]; then
echo "true"
else
echo "false"
fi
}
@test "Test multinode network create" {
echo $(docker ps)
for i in `seq 1 3`;
do
oname="mh$i"
run dnet_cmd $(inst_id2port $i) network create -d test ${oname}
echo ${output}
[ "$status" -eq 0 ]
for j in `seq 1 3`;
do
result=$(is_network_exist $j ${oname} test)
[ "$result" = "true" ]
done
# Always try to remove the network from the second node
dnet_cmd $(inst_id2port 2) network rm ${oname}
echo "delete ${oname}"
nresult=$(is_network_exist 1 ${oname} test)
echo ${nresult}
dnet_cmd $(inst_id2port 1) network ls
[ "$nresult" = "false" ]
done
}
@test "Test multinode service create" {
echo $(docker ps)
dnet_cmd $(inst_id2port 1) network create -d test multihost
for i in `seq 1 3`;
do
oname="svc$i"
run dnet_cmd $(inst_id2port $i) service publish ${oname}.multihost
echo ${output}
[ "$status" -eq 0 ]
for j in `seq 1 3`;
do
run dnet_cmd $(inst_id2port $j) service ls
[ "$status" -eq 0 ]
echo ${output}
echo ${lines[1]}
svc=$(echo ${lines[1]} | cut -d" " -f2)
network=$(echo ${lines[1]} | cut -d" " -f3)
echo ${svc} ${network}
[ "$network" = "multihost" ]
[ "$svc" = "${oname}" ]
done
dnet_cmd $(inst_id2port 2) service unpublish ${oname}.multihost
done
dnet_cmd $(inst_id2port 3) network rm multihost
}
@test "Test multinode service attach" {
echo $(docker ps)
dnet_cmd $(inst_id2port 2) network create -d test multihost
dnet_cmd $(inst_id2port 3) service publish svc.multihost
for i in `seq 1 3`;
do
dnet_cmd $(inst_id2port $i) container create container_${i}
dnet_cmd $(inst_id2port $i) service attach container_${i} svc.multihost
run dnet_cmd $(inst_id2port $i) service ls
[ "$status" -eq 0 ]
echo ${output}
echo ${lines[1]}
container=$(echo ${lines[1]} | cut -d" " -f4)
[ "$container" = "container_$i" ]
for j in `seq 1 3`;
do
if [ "$j" = "$i" ]; then
continue
fi
dnet_cmd $(inst_id2port $j) container create container_${j}
run dnet_cmd $(inst_id2port $j) service attach container_${j} svc.multihost
echo ${output}
[ "$status" -ne 0 ]
dnet_cmd $(inst_id2port $j) container rm container_${j}
done
dnet_cmd $(inst_id2port $i) service detach container_${i} svc.multihost
dnet_cmd $(inst_id2port $i) container rm container_${i}
done
dnet_cmd $(inst_id2port 1) service unpublish svc.multihost
dnet_cmd $(inst_id2port 3) network rm multihost
}
@test "Test multinode network and service delete" {
echo $(docker ps)
for i in `seq 1 3`;
do
oname="mh$i"
osvc="svc$i"
dnet_cmd $(inst_id2port $i) network create -d test ${oname}
dnet_cmd $(inst_id2port $i) service publish ${osvc}.${oname}
dnet_cmd $(inst_id2port $i) container create container_${i}
dnet_cmd $(inst_id2port $i) network ls
dnet_cmd $(inst_id2port $i) service attach container_${i} ${osvc}.${oname}
for j in `seq 1 3`;
do
run dnet_cmd $(inst_id2port $i) service unpublish ${osvc}.${oname}
echo ${output}
[ "$status" -ne 0 ]
run dnet_cmd $(inst_id2port $j) network rm ${oname}
echo ${output}
[ "$status" -ne 0 ]
done
dnet_cmd $(inst_id2port $i) service detach container_${i} ${osvc}.${oname}
dnet_cmd $(inst_id2port $i) container rm container_${i}
# Always try to remove the service from different nodes
dnet_cmd $(inst_id2port 2) service unpublish ${osvc}.${oname}
dnet_cmd $(inst_id2port 3) network rm ${oname}
done
}

View File

@ -1,57 +0,0 @@
# -*- mode: sh -*-
#!/usr/bin/env bats
load helpers
function test_overlay_local() {
dnet_suffix=$1
echo $(docker ps)
start=1
end=3
for i in `seq ${start} ${end}`;
do
echo "iteration count ${i}"
dnet_cmd $(inst_id2port $i) network create -d overlay --id=mhid --subnet=10.1.0.0/16 --ip-range=10.1.${i}.0/24 --opt=com.docker.network.driver.overlay.vxlanid_list=1024 multihost
dnet_cmd $(inst_id2port $i) container create container_${i}
net_connect ${i} container_${i} multihost
done
# Now test connectivity between all the containers using service names
for i in `seq ${start} ${end}`;
do
if [ -z "${2}" -o "${2}" != "internal" ]; then
runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) \
"ping -c 1 www.google.com"
else
default_route=`runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) "ip route | grep default"`
[ "$default_route" = "" ]
fi
for j in `seq ${start} ${end}`;
do
if [ "$i" -eq "$j" ]; then
continue
fi
#runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) "ping -c 1 10.1.${j}.1"
runc $(dnet_container_name $i $dnet_suffix) $(get_sbox_id ${i} container_${i}) "ping -c 1 container_${j}"
done
done
# Teardown the container connections and the network
for i in `seq ${start} ${end}`;
do
net_disconnect ${i} container_${i} multihost
dnet_cmd $(inst_id2port $i) container rm container_${i}
done
if [ -z "${2}" -o "${2}" != "skip_rm" ]; then
dnet_cmd $(inst_id2port 2) network rm multihost
fi
}
@test "Test overlay network in local scope" {
test_overlay_local local
}
#"ping -c 1 10.1.${j}.1"

View File

@ -1,91 +0,0 @@
#!/usr/bin/env bash
set -e
export INTEGRATION_ROOT=./integration-tmp
export TMPC_ROOT=./integration-tmp/tmpc
declare -A cmap
trap "cleanup_containers" EXIT SIGINT
function cleanup_containers() {
for c in "${!cmap[@]}"; do
docker rm -f $c 1>> ${INTEGRATION_ROOT}/test.log 2>&1 || true
done
unset cmap
}
function run_bridge_tests() {
## Setup
start_dnet 1 bridge 1>> ${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet - 1 - bridge]=dnet-1-bridge
## Run the test cases
./integration-tmp/bin/bats ./test/integration/dnet/bridge.bats
## Teardown
stop_dnet 1 bridge 1>> ${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-bridge]
}
function run_overlay_local_tests() {
## Test overlay network in local scope
## Setup
start_dnet 1 local 1>> ${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet - 1 - local]=dnet-1-local
start_dnet 2 local:$(dnet_container_ip 1 local) 1>> ${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet - 2 - local]=dnet-2-local
start_dnet 3 local:$(dnet_container_ip 1 local) 1>> ${INTEGRATION_ROOT}/test.log 2>&1
cmap[dnet - 3 - local]=dnet-3-local
## Run the test cases
./integration-tmp/bin/bats ./test/integration/dnet/overlay-local.bats
## Teardown
stop_dnet 1 local 1>> ${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-1-local]
stop_dnet 2 local 1>> ${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-2-local]
stop_dnet 3 local 1>> ${INTEGRATION_ROOT}/test.log 2>&1
unset cmap[dnet-3-local]
}
function run_dnet_tests() {
# Test dnet configuration options
./integration-tmp/bin/bats ./test/integration/dnet/dnet.bats
}
source ./test/integration/dnet/helpers.bash
if [ ! -d ${INTEGRATION_ROOT} ]; then
mkdir -p ${INTEGRATION_ROOT}
git clone https://github.com/sstephenson/bats.git ${INTEGRATION_ROOT}/bats
./integration-tmp/bats/install.sh ./integration-tmp
fi
if [ ! -d ${TMPC_ROOT} ]; then
mkdir -p ${TMPC_ROOT}
docker pull busybox:ubuntu
docker export $(docker create busybox:ubuntu) > ${TMPC_ROOT}/busybox.tar
mkdir -p ${TMPC_ROOT}/rootfs
tar -C ${TMPC_ROOT}/rootfs -xf ${TMPC_ROOT}/busybox.tar
fi
# Suite setup
if [ -z "$SUITES" ]; then
suites="dnet bridge"
else
suites="$SUITES"
fi
echo ""
for suite in ${suites}; do
suite_func=run_${suite}_tests
echo "Running ${suite}_tests ..."
declare -F $suite_func > /dev/null && $suite_func
echo ""
done

View File

@ -1,80 +0,0 @@
#!/usr/bin/env bats
load helpers
@test "Test network create" {
echo $(docker ps)
run dnet_cmd $(inst_id2port 1) network create -d test mh1
echo ${output}
[ "$status" -eq 0 ]
run dnet_cmd $(inst_id2port 1) network ls
echo ${output}
line=$(dnet_cmd $(inst_id2port 1) network ls | grep mh1)
echo ${line}
name=$(echo ${line} | cut -d" " -f2)
driver=$(echo ${line} | cut -d" " -f3)
echo ${name} ${driver}
[ "$name" = "mh1" ]
[ "$driver" = "test" ]
dnet_cmd $(inst_id2port 1) network rm mh1
}
@test "Test network delete with id" {
echo $(docker ps)
run dnet_cmd $(inst_id2port 1) network create -d test mh1
[ "$status" -eq 0 ]
echo ${output}
dnet_cmd $(inst_id2port 1) network rm ${output}
}
@test "Test service create" {
echo $(docker ps)
dnet_cmd $(inst_id2port 1) network create -d test multihost
run dnet_cmd $(inst_id2port 1) service publish svc1.multihost
echo ${output}
[ "$status" -eq 0 ]
run dnet_cmd $(inst_id2port 1) service ls
echo ${output}
echo ${lines[1]}
[ "$status" -eq 0 ]
svc=$(echo ${lines[1]} | cut -d" " -f2)
network=$(echo ${lines[1]} | cut -d" " -f3)
echo ${svc} ${network}
[ "$network" = "multihost" ]
[ "$svc" = "svc1" ]
dnet_cmd $(inst_id2port 1) service unpublish svc1.multihost
dnet_cmd $(inst_id2port 1) network rm multihost
}
@test "Test service delete with id" {
echo $(docker ps)
dnet_cmd $(inst_id2port 1) network create -d test multihost
run dnet_cmd $(inst_id2port 1) service publish svc1.multihost
[ "$status" -eq 0 ]
echo ${output}
run dnet_cmd $(inst_id2port 1) service ls
[ "$status" -eq 0 ]
echo ${output}
echo ${lines[1]}
id=$(echo ${lines[1]} | cut -d" " -f1)
dnet_cmd $(inst_id2port 1) service unpublish ${id}.multihost
dnet_cmd $(inst_id2port 1) network rm multihost
}
@test "Test service attach" {
echo $(docker ps)
dnet_cmd $(inst_id2port 1) network create -d test multihost
dnet_cmd $(inst_id2port 1) service publish svc1.multihost
dnet_cmd $(inst_id2port 1) container create container_1
dnet_cmd $(inst_id2port 1) service attach container_1 svc1.multihost
run dnet_cmd $(inst_id2port 1) service ls
[ "$status" -eq 0 ]
echo ${output}
echo ${lines[1]}
container=$(echo ${lines[1]} | cut -d" " -f4)
[ "$container" = "container_1" ]
dnet_cmd $(inst_id2port 1) service detach container_1 svc1.multihost
dnet_cmd $(inst_id2port 1) container rm container_1
dnet_cmd $(inst_id2port 1) service unpublish svc1.multihost
dnet_cmd $(inst_id2port 1) network rm multihost
}

View File

@ -1,49 +0,0 @@
#!/bin/bash
# Root directory of the repository.
MACHINE_ROOT=/usr/bin
PLATFORM=$(uname -s | tr '[:upper:]' '[:lower:]')
ARCH=$(uname -m)
if [ "$ARCH" = "x86_64" ]; then
ARCH="amd64"
else
ARCH="386"
fi
MACHINE_BIN_NAME=docker-machine_$PLATFORM-$ARCH
BATS_LOG=/tmp/bats.log
touch ${BATS_LOG}
rm ${BATS_LOG}
teardown() {
echo "$BATS_TEST_NAME
----------
$output
----------
" >> ${BATS_LOG}
}
EXTRA_ARGS_CFG='EXTRA_ARGS'
EXTRA_ARGS='--tlsverify --tlscacert=/var/lib/boot2docker/ca.pem --tlskey=/var/lib/boot2docker/server-key.pem --tlscert=/var/lib/boot2docker/server.pem --label=provider=virtualbox -H tcp://0.0.0.0:2376'
TMP_EXTRA_ARGS_FILE=/tmp/tmp_extra_args
DAEMON_CFG_FILE=${BATS_TEST_DIRNAME}/daemon.cfg
set_extra_config() {
if [ -f ${TMP_EXTRA_ARGS_FILE} ]; then
rm ${TMP_EXTRA_ARGS_FILE}
fi
echo -n "${EXTRA_ARGS_CFG}='" > ${TMP_EXTRA_ARGS_FILE}
echo -n "$1 " >> ${TMP_EXTRA_ARGS_FILE}
echo "${EXTRA_ARGS}'" >> ${TMP_EXTRA_ARGS_FILE}
}
if [ ! -e $MACHINE_ROOT/$MACHINE_BIN_NAME ]; then
echo "${MACHINE_ROOT}/${MACHINE_BIN_NAME} not found"
exit 1
fi
function machine() {
${MACHINE_ROOT}/$MACHINE_BIN_NAME "$@"
}