mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
commit
0890a1a95d
4 changed files with 202 additions and 62 deletions
|
@ -108,6 +108,12 @@ func (s *sequence) getAvailableBit(from uint64) (uint64, uint64, error) {
|
|||
bitSel >>= 1
|
||||
bits++
|
||||
}
|
||||
// Check if the loop exited because it could not
|
||||
// find any available bit int block starting from
|
||||
// "from". Return invalid pos in that case.
|
||||
if bitSel == 0 {
|
||||
return invalidPos, invalidPos, ErrNoBitAvailable
|
||||
}
|
||||
return bits / 8, bits % 8, nil
|
||||
}
|
||||
|
||||
|
@ -313,14 +319,14 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial
|
|||
curr := uint64(0)
|
||||
h.Lock()
|
||||
store = h.store
|
||||
h.Unlock()
|
||||
if store != nil {
|
||||
h.Unlock() // The lock is acquired in the GetObject
|
||||
if err := store.GetObject(datastore.Key(h.Key()...), h); err != nil && err != datastore.ErrKeyNotFound {
|
||||
return ret, err
|
||||
}
|
||||
h.Lock() // Acquire the lock back
|
||||
}
|
||||
|
||||
h.Lock()
|
||||
logrus.Debugf("Received set for ordinal %v, start %v, end %v, any %t, release %t, serial:%v curr:%d \n", ordinal, start, end, any, release, serial, h.curr)
|
||||
if serial {
|
||||
curr = h.curr
|
||||
}
|
||||
|
@ -346,7 +352,6 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial
|
|||
|
||||
// Create a private copy of h and work on it
|
||||
nh := h.getCopy()
|
||||
h.Unlock()
|
||||
|
||||
nh.head = pushReservation(bytePos, bitPos, nh.head, release)
|
||||
if release {
|
||||
|
@ -355,22 +360,25 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial
|
|||
nh.unselected--
|
||||
}
|
||||
|
||||
// Attempt to write private copy to store
|
||||
if err := nh.writeToStore(); err != nil {
|
||||
if _, ok := err.(types.RetryError); !ok {
|
||||
return ret, fmt.Errorf("internal failure while setting the bit: %v", err)
|
||||
if h.store != nil {
|
||||
h.Unlock()
|
||||
// Attempt to write private copy to store
|
||||
if err := nh.writeToStore(); err != nil {
|
||||
if _, ok := err.(types.RetryError); !ok {
|
||||
return ret, fmt.Errorf("internal failure while setting the bit: %v", err)
|
||||
}
|
||||
// Retry
|
||||
continue
|
||||
}
|
||||
// Retry
|
||||
continue
|
||||
h.Lock()
|
||||
}
|
||||
|
||||
// Previous atomic push was succesfull. Save private copy to local copy
|
||||
h.Lock()
|
||||
defer h.Unlock()
|
||||
h.unselected = nh.unselected
|
||||
h.head = nh.head
|
||||
h.dbExists = nh.dbExists
|
||||
h.dbIndex = nh.dbIndex
|
||||
h.Unlock()
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
|
@ -498,24 +506,40 @@ func (h *Handle) UnmarshalJSON(data []byte) error {
|
|||
func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) {
|
||||
// Find sequence which contains the start bit
|
||||
byteStart, bitStart := ordinalToPos(start)
|
||||
current, _, _, inBlockBytePos := findSequence(head, byteStart)
|
||||
|
||||
current, _, precBlocks, inBlockBytePos := findSequence(head, byteStart)
|
||||
// Derive the this sequence offsets
|
||||
byteOffset := byteStart - inBlockBytePos
|
||||
bitOffset := inBlockBytePos*8 + bitStart
|
||||
var firstOffset uint64
|
||||
if current == head {
|
||||
firstOffset = byteOffset
|
||||
}
|
||||
for current != nil {
|
||||
if current.block != blockMAX {
|
||||
// If the current block is not full, check if there is any bit
|
||||
// from the current bit in the current block. If not, before proceeding to the
|
||||
// next block node, make sure we check for available bit in the next
|
||||
// instance of the same block. Due to RLE same block signature will be
|
||||
// compressed.
|
||||
retry:
|
||||
bytePos, bitPos, err := current.getAvailableBit(bitOffset)
|
||||
if err != nil && precBlocks == current.count-1 {
|
||||
// This is the last instance in the same block node,
|
||||
// so move to the next block.
|
||||
goto next
|
||||
}
|
||||
if err != nil {
|
||||
// There are some more instances of the same block, so add the offset
|
||||
// and be optimistic that you will find the available bit in the next
|
||||
// instance of the same block.
|
||||
bitOffset = 0
|
||||
byteOffset += blockBytes
|
||||
precBlocks++
|
||||
goto retry
|
||||
}
|
||||
return byteOffset + bytePos, bitPos, err
|
||||
}
|
||||
// Moving to next block: Reset bit offset.
|
||||
next:
|
||||
bitOffset = 0
|
||||
byteOffset += (current.count * blockBytes) - firstOffset
|
||||
firstOffset = 0
|
||||
byteOffset += (current.count * blockBytes) - (precBlocks * blockBytes)
|
||||
precBlocks = 0
|
||||
current = current.next
|
||||
}
|
||||
return invalidPos, invalidPos, ErrNoBitAvailable
|
||||
|
@ -526,19 +550,20 @@ func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) {
|
|||
// This can be further optimized to check from start till curr in case of a rollover
|
||||
func getAvailableFromCurrent(head *sequence, start, curr, end uint64) (uint64, uint64, error) {
|
||||
var bytePos, bitPos uint64
|
||||
var err error
|
||||
if curr != 0 && curr > start {
|
||||
bytePos, bitPos, _ = getFirstAvailable(head, curr)
|
||||
bytePos, bitPos, err = getFirstAvailable(head, curr)
|
||||
ret := posToOrdinal(bytePos, bitPos)
|
||||
if end < ret {
|
||||
if end < ret || err != nil {
|
||||
goto begin
|
||||
}
|
||||
return bytePos, bitPos, nil
|
||||
}
|
||||
|
||||
begin:
|
||||
bytePos, bitPos, _ = getFirstAvailable(head, start)
|
||||
bytePos, bitPos, err = getFirstAvailable(head, start)
|
||||
ret := posToOrdinal(bytePos, bitPos)
|
||||
if end < ret {
|
||||
if end < ret || err != nil {
|
||||
return invalidPos, invalidPos, ErrNoBitAvailable
|
||||
}
|
||||
return bytePos, bitPos, nil
|
||||
|
|
|
@ -192,6 +192,11 @@ func TestGetFirstAvailable(t *testing.T) {
|
|||
{&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xfffffffe, count: 1, next: &sequence{block: 0xffffffff, count: 6}}}, 7, 7, 0},
|
||||
|
||||
{&sequence{block: 0xffffffff, count: 2, next: &sequence{block: 0x0, count: 6}}, 8, 0, 0},
|
||||
{&sequence{block: 0xfffcffff, count: 1, next: &sequence{block: 0x0, count: 6}}, 4, 0, 16},
|
||||
{&sequence{block: 0xfffcffff, count: 1, next: &sequence{block: 0x0, count: 6}}, 1, 7, 15},
|
||||
{&sequence{block: 0xfffcffff, count: 1, next: &sequence{block: 0x0, count: 6}}, 1, 6, 10},
|
||||
{&sequence{block: 0xfffcfffe, count: 1, next: &sequence{block: 0x0, count: 6}}, 3, 7, 31},
|
||||
{&sequence{block: 0xfffcffff, count: 1, next: &sequence{block: 0xffffffff, count: 6}}, invalidPos, invalidPos, 31},
|
||||
}
|
||||
|
||||
for n, i := range input {
|
||||
|
@ -1238,7 +1243,7 @@ func TestIsCorrupted(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSetRollover(t *testing.T) {
|
||||
func testSetRollover(t *testing.T, serial bool) {
|
||||
ds, err := randomLocalStore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -1253,7 +1258,7 @@ func TestSetRollover(t *testing.T) {
|
|||
|
||||
// Allocate first half of the bits
|
||||
for i := 0; i < numBits/2; i++ {
|
||||
_, err := hnd.SetAny(true)
|
||||
_, err := hnd.SetAny(serial)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected failure on allocation %d: %v\n%s", i, err, hnd)
|
||||
}
|
||||
|
@ -1276,12 +1281,12 @@ func TestSetRollover(t *testing.T) {
|
|||
}
|
||||
}
|
||||
if hnd.Unselected() != uint64(3*numBits/4) {
|
||||
t.Fatalf("Expected full sequence. Instead found %d free bits.\nSeed: %d.\n%s", hnd.unselected, seed, hnd)
|
||||
t.Fatalf("Unexpected free bits: found %d free bits.\nSeed: %d.\n%s", hnd.unselected, seed, hnd)
|
||||
}
|
||||
|
||||
//request to allocate for remaining half of the bits
|
||||
for i := 0; i < numBits/2; i++ {
|
||||
_, err := hnd.SetAny(true)
|
||||
_, err := hnd.SetAny(serial)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd)
|
||||
}
|
||||
|
@ -1294,7 +1299,7 @@ func TestSetRollover(t *testing.T) {
|
|||
}
|
||||
|
||||
for i := 0; i < numBits/4; i++ {
|
||||
_, err := hnd.SetAny(true)
|
||||
_, err := hnd.SetAny(serial)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected failure on allocation %d: %v\nSeed: %d\n%s", i, err, seed, hnd)
|
||||
}
|
||||
|
@ -1302,7 +1307,7 @@ func TestSetRollover(t *testing.T) {
|
|||
//Now requesting to allocate the unallocated random bits (qurter of the number of bits) should
|
||||
//leave no more bits that can be allocated.
|
||||
if hnd.Unselected() != 0 {
|
||||
t.Fatalf("Unexpected number of unselected bits %d, Expected %d", hnd.Unselected(), numBits/4)
|
||||
t.Fatalf("Unexpected number of unselected bits %d, Expected %d", hnd.Unselected(), 0)
|
||||
}
|
||||
|
||||
err = hnd.Destroy()
|
||||
|
@ -1310,3 +1315,47 @@ func TestSetRollover(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetRollover(t *testing.T) {
|
||||
testSetRollover(t, false)
|
||||
}
|
||||
|
||||
func TestSetRolloverSerial(t *testing.T) {
|
||||
testSetRollover(t, true)
|
||||
}
|
||||
|
||||
func TestGetFirstAvailableFromCurrent(t *testing.T) {
|
||||
input := []struct {
|
||||
mask *sequence
|
||||
bytePos uint64
|
||||
bitPos uint64
|
||||
start uint64
|
||||
curr uint64
|
||||
end uint64
|
||||
}{
|
||||
{&sequence{block: 0xffffffff, count: 2048}, invalidPos, invalidPos, 0, 0, 65536},
|
||||
{&sequence{block: 0x0, count: 8}, 0, 0, 0, 0, 256},
|
||||
{&sequence{block: 0x80000000, count: 8}, 1, 0, 0, 8, 256},
|
||||
{&sequence{block: 0xC0000000, count: 8}, 0, 2, 0, 2, 256},
|
||||
{&sequence{block: 0xE0000000, count: 8}, 0, 3, 0, 0, 256},
|
||||
{&sequence{block: 0xFFFB1FFF, count: 8}, 2, 0, 14, 0, 256},
|
||||
{&sequence{block: 0xFFFFFFFE, count: 8}, 3, 7, 0, 0, 256},
|
||||
|
||||
{&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0x00000000, count: 1, next: &sequence{block: 0xffffffff, count: 14}}}, 4, 0, 0, 32, 512},
|
||||
{&sequence{block: 0xfffeffff, count: 1, next: &sequence{block: 0xffffffff, count: 15}}, 1, 7, 0, 16, 512},
|
||||
{&sequence{block: 0xfffeffff, count: 15, next: &sequence{block: 0xffffffff, count: 1}}, 5, 7, 0, 16, 512},
|
||||
{&sequence{block: 0xfffeffff, count: 15, next: &sequence{block: 0xffffffff, count: 1}}, 9, 7, 0, 48, 512},
|
||||
{&sequence{block: 0xffffffff, count: 2, next: &sequence{block: 0xffffffef, count: 14}}, 19, 3, 0, 124, 512},
|
||||
{&sequence{block: 0xfffeffff, count: 15, next: &sequence{block: 0x0fffffff, count: 1}}, 60, 0, 0, 480, 512},
|
||||
{&sequence{block: 0xffffffff, count: 1, next: &sequence{block: 0xfffeffff, count: 14, next: &sequence{block: 0xffffffff, count: 1}}}, 17, 7, 0, 124, 512},
|
||||
{&sequence{block: 0xfffffffb, count: 1, next: &sequence{block: 0xffffffff, count: 14, next: &sequence{block: 0xffffffff, count: 1}}}, 3, 5, 0, 124, 512},
|
||||
{&sequence{block: 0xfffffffb, count: 1, next: &sequence{block: 0xfffeffff, count: 14, next: &sequence{block: 0xffffffff, count: 1}}}, 13, 7, 0, 80, 512},
|
||||
}
|
||||
|
||||
for n, i := range input {
|
||||
bytePos, bitPos, _ := getAvailableFromCurrent(i.mask, i.start, i.curr, i.end)
|
||||
if bytePos != i.bytePos || bitPos != i.bitPos {
|
||||
t.Fatalf("Error in (%d) getFirstAvailable(). Expected (%d, %d). Got (%d, %d)", n, i.bytePos, i.bitPos, bytePos, bitPos)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -402,15 +402,15 @@ func (a *Allocator) getPredefinedPool(as string, ipV6 bool) (*net.IPNet, error)
|
|||
continue
|
||||
}
|
||||
aSpace.Lock()
|
||||
_, ok := aSpace.subnets[SubnetKey{AddressSpace: as, Subnet: nw.String()}]
|
||||
aSpace.Unlock()
|
||||
if ok {
|
||||
if _, ok := aSpace.subnets[SubnetKey{AddressSpace: as, Subnet: nw.String()}]; ok {
|
||||
aSpace.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
if !aSpace.contains(as, nw) {
|
||||
aSpace.Unlock()
|
||||
return nw, nil
|
||||
}
|
||||
aSpace.Unlock()
|
||||
}
|
||||
|
||||
return nil, types.NotFoundErrorf("could not find an available, non-overlapping IPv%d address pool among the defaults to assign to the network", v)
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -30,7 +31,10 @@ func init() {
|
|||
}
|
||||
|
||||
// OptionBoltdbWithRandomDBFile function returns a random dir for local store backend
|
||||
func randomLocalStore() (datastore.DataStore, error) {
|
||||
func randomLocalStore(needStore bool) (datastore.DataStore, error) {
|
||||
if !needStore {
|
||||
return nil, nil
|
||||
}
|
||||
tmp, err := ioutil.TempFile("", "libnetwork-")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating temp file: %v", err)
|
||||
|
@ -50,17 +54,13 @@ func randomLocalStore() (datastore.DataStore, error) {
|
|||
})
|
||||
}
|
||||
|
||||
func getAllocator() (*Allocator, error) {
|
||||
func getAllocator(store bool) (*Allocator, error) {
|
||||
ipamutils.InitNetworks(nil)
|
||||
ds, err := randomLocalStore()
|
||||
ds, err := randomLocalStore(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a, err := NewAllocator(ds, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a, nil
|
||||
return NewAllocator(ds, nil)
|
||||
}
|
||||
|
||||
func TestInt2IP2IntConversion(t *testing.T) {
|
||||
|
@ -170,7 +170,7 @@ func TestPoolDataMarshal(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSubnetsMarshal(t *testing.T) {
|
||||
a, err := getAllocator()
|
||||
a, err := getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -217,7 +217,7 @@ func TestSubnetsMarshal(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAddSubnets(t *testing.T) {
|
||||
a, err := getAllocator()
|
||||
a, err := getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ func TestAddSubnets(t *testing.T) {
|
|||
func TestAddReleasePoolID(t *testing.T) {
|
||||
var k0, k1, k2 SubnetKey
|
||||
|
||||
a, err := getAllocator()
|
||||
a, err := getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -448,7 +448,7 @@ func TestAddReleasePoolID(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPredefinedPool(t *testing.T) {
|
||||
a, err := getAllocator()
|
||||
a, err := getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -476,7 +476,7 @@ func TestPredefinedPool(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRemoveSubnet(t *testing.T) {
|
||||
a, err := getAllocator()
|
||||
a, err := getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -519,7 +519,7 @@ func TestRemoveSubnet(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetSameAddress(t *testing.T) {
|
||||
a, err := getAllocator()
|
||||
a, err := getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -549,7 +549,7 @@ func TestGetSameAddress(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetAddressSubPoolEqualPool(t *testing.T) {
|
||||
a, err := getAllocator()
|
||||
a, err := getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -566,7 +566,7 @@ func TestGetAddressSubPoolEqualPool(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRequestReleaseAddressFromSubPool(t *testing.T) {
|
||||
a, err := getAllocator()
|
||||
a, err := getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -697,8 +697,9 @@ func TestRequestReleaseAddressFromSubPool(t *testing.T) {
|
|||
|
||||
func TestSerializeRequestReleaseAddressFromSubPool(t *testing.T) {
|
||||
opts := map[string]string{
|
||||
ipamapi.AllocSerialPrefix: "true"}
|
||||
a, err := getAllocator()
|
||||
ipamapi.AllocSerialPrefix: "true",
|
||||
}
|
||||
a, err := getAllocator(false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -848,7 +849,7 @@ func TestRequestSyntaxCheck(t *testing.T) {
|
|||
err error
|
||||
)
|
||||
|
||||
a, err := getAllocator()
|
||||
a, err := getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -950,7 +951,7 @@ func TestRelease(t *testing.T) {
|
|||
subnet = "192.168.0.0/23"
|
||||
)
|
||||
|
||||
a, err := getAllocator()
|
||||
a, err := getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1057,7 +1058,7 @@ func assertNRequests(t *testing.T, subnet string, numReq int, lastExpectedIP str
|
|||
)
|
||||
|
||||
lastIP := net.ParseIP(lastExpectedIP)
|
||||
a, err := getAllocator()
|
||||
a, err := getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1088,24 +1089,24 @@ func benchmarkRequest(b *testing.B, a *Allocator, subnet string) {
|
|||
}
|
||||
|
||||
func benchMarkRequest(subnet string, b *testing.B) {
|
||||
a, _ := getAllocator()
|
||||
a, _ := getAllocator(true)
|
||||
for n := 0; n < b.N; n++ {
|
||||
benchmarkRequest(b, a, subnet)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRequest_24(b *testing.B) {
|
||||
a, _ := getAllocator()
|
||||
a, _ := getAllocator(true)
|
||||
benchmarkRequest(b, a, "10.0.0.0/24")
|
||||
}
|
||||
|
||||
func BenchmarkRequest_16(b *testing.B) {
|
||||
a, _ := getAllocator()
|
||||
a, _ := getAllocator(true)
|
||||
benchmarkRequest(b, a, "10.0.0.0/16")
|
||||
}
|
||||
|
||||
func BenchmarkRequest_8(b *testing.B) {
|
||||
a, _ := getAllocator()
|
||||
a, _ := getAllocator(true)
|
||||
benchmarkRequest(b, a, "10.0.0.0/8")
|
||||
}
|
||||
|
||||
|
@ -1115,7 +1116,7 @@ func TestAllocateRandomDeallocate(t *testing.T) {
|
|||
}
|
||||
|
||||
func testAllocateRandomDeallocate(t *testing.T, pool, subPool string, num int) {
|
||||
ds, err := randomLocalStore()
|
||||
ds, err := randomLocalStore(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1184,7 +1185,7 @@ func testAllocateRandomDeallocate(t *testing.T, pool, subPool string, num int) {
|
|||
|
||||
func TestRetrieveFromStore(t *testing.T) {
|
||||
num := 200
|
||||
ds, err := randomLocalStore()
|
||||
ds, err := randomLocalStore(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1319,7 +1320,7 @@ func runParallelTests(t *testing.T, instance int) {
|
|||
// The first instance creates the allocator, gives the start
|
||||
// and finally checks the pools each instance was assigned
|
||||
if instance == first {
|
||||
allocator, err = getAllocator()
|
||||
allocator, err = getAllocator(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1362,6 +1363,71 @@ func runParallelTests(t *testing.T, instance int) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRequestReleaseAddressDuplicate(t *testing.T) {
|
||||
a, err := getAllocator(false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
type IP struct {
|
||||
ip *net.IPNet
|
||||
ref int
|
||||
}
|
||||
ips := []IP{}
|
||||
allocatedIPs := []*net.IPNet{}
|
||||
a.addrSpaces["rosso"] = &addrSpace{
|
||||
id: dsConfigKey + "/" + "rosso",
|
||||
ds: a.addrSpaces[localAddressSpace].ds,
|
||||
alloc: a.addrSpaces[localAddressSpace].alloc,
|
||||
scope: a.addrSpaces[localAddressSpace].scope,
|
||||
subnets: map[SubnetKey]*PoolData{},
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
opts := map[string]string{
|
||||
ipamapi.AllocSerialPrefix: "true",
|
||||
}
|
||||
var l sync.Mutex
|
||||
|
||||
poolID, _, _, err := a.RequestPool("rosso", "198.168.0.0/23", "", nil, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for err == nil {
|
||||
var c *net.IPNet
|
||||
if c, _, err = a.RequestAddress(poolID, nil, opts); err == nil {
|
||||
l.Lock()
|
||||
ips = append(ips, IP{c, 1})
|
||||
l.Unlock()
|
||||
allocatedIPs = append(allocatedIPs, c)
|
||||
if len(allocatedIPs) > 500 {
|
||||
i := rand.Intn(len(allocatedIPs) - 1)
|
||||
wg.Add(1)
|
||||
go func(ip *net.IPNet) {
|
||||
if err = a.ReleaseAddress(poolID, ip.IP); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
l.Lock()
|
||||
ips = append(ips, IP{ip, -1})
|
||||
l.Unlock()
|
||||
wg.Done()
|
||||
}(allocatedIPs[i])
|
||||
allocatedIPs = append(allocatedIPs[:i], allocatedIPs[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
refMap := make(map[string]int)
|
||||
for _, ip := range ips {
|
||||
refMap[ip.ip.String()] = refMap[ip.ip.String()] + ip.ref
|
||||
if refMap[ip.ip.String()] < 0 {
|
||||
t.Fatalf("IP %s was previously released", ip.ip.String())
|
||||
}
|
||||
if refMap[ip.ip.String()] > 1 {
|
||||
t.Fatalf("IP %s was previously allocated", ip.ip.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParallelPredefinedRequest1(t *testing.T) {
|
||||
runParallelTests(t, 0)
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue