Fixing Duplicate IP issue in IPAM library

This commit contains fixes for duplicate IP with 3 issues addressed:
1) Race condition when datastore is not present in cases like swarmkit
2) Byte Offset calculation depending on where the start of the bit
   in the bitsequence is, the offset was adding more bytes to the offset
   when the start of the bit is in the middle of one of the instances in
   a block
3) Finding the available bit was returning the last bit in the curent instance in
   a block if the block is not full and the current bit is after the last
   available bit.

Signed-off-by: Abhinandan Prativadi <abhi@docker.com>
This commit is contained in:
Abhinandan Prativadi 2018-03-08 10:34:40 -08:00
parent 794025df9f
commit 6a8a15dd9d
1 changed files with 49 additions and 24 deletions

View File

@ -108,6 +108,12 @@ func (s *sequence) getAvailableBit(from uint64) (uint64, uint64, error) {
bitSel >>= 1
bits++
}
// Check if the loop exited because it could not
// find any available bit int block starting from
// "from". Return invalid pos in that case.
if bitSel == 0 {
return invalidPos, invalidPos, ErrNoBitAvailable
}
return bits / 8, bits % 8, nil
}
@ -313,14 +319,14 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial
curr := uint64(0)
h.Lock()
store = h.store
h.Unlock()
if store != nil {
h.Unlock() // The lock is acquired in the GetObject
if err := store.GetObject(datastore.Key(h.Key()...), h); err != nil && err != datastore.ErrKeyNotFound {
return ret, err
}
h.Lock() // Acquire the lock back
}
h.Lock()
logrus.Debugf("Received set for ordinal %v, start %v, end %v, any %t, release %t, serial:%v curr:%d \n", ordinal, start, end, any, release, serial, h.curr)
if serial {
curr = h.curr
}
@ -346,7 +352,6 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial
// Create a private copy of h and work on it
nh := h.getCopy()
h.Unlock()
nh.head = pushReservation(bytePos, bitPos, nh.head, release)
if release {
@ -355,22 +360,25 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial
nh.unselected--
}
// Attempt to write private copy to store
if err := nh.writeToStore(); err != nil {
if _, ok := err.(types.RetryError); !ok {
return ret, fmt.Errorf("internal failure while setting the bit: %v", err)
if h.store != nil {
h.Unlock()
// Attempt to write private copy to store
if err := nh.writeToStore(); err != nil {
if _, ok := err.(types.RetryError); !ok {
return ret, fmt.Errorf("internal failure while setting the bit: %v", err)
}
// Retry
continue
}
// Retry
continue
h.Lock()
}
// Previous atomic push was succesfull. Save private copy to local copy
h.Lock()
defer h.Unlock()
h.unselected = nh.unselected
h.head = nh.head
h.dbExists = nh.dbExists
h.dbIndex = nh.dbIndex
h.Unlock()
return ret, nil
}
}
@ -498,24 +506,40 @@ func (h *Handle) UnmarshalJSON(data []byte) error {
func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) {
// Find sequence which contains the start bit
byteStart, bitStart := ordinalToPos(start)
current, _, _, inBlockBytePos := findSequence(head, byteStart)
current, _, precBlocks, inBlockBytePos := findSequence(head, byteStart)
// Derive the this sequence offsets
byteOffset := byteStart - inBlockBytePos
bitOffset := inBlockBytePos*8 + bitStart
var firstOffset uint64
if current == head {
firstOffset = byteOffset
}
for current != nil {
if current.block != blockMAX {
// If the current block is not full, check if there is any bit
// from the current bit in the current block. If not, before proceeding to the
// next block node, make sure we check for available bit in the next
// instance of the same block. Due to RLE same block signature will be
// compressed.
retry:
bytePos, bitPos, err := current.getAvailableBit(bitOffset)
if err != nil && precBlocks == current.count-1 {
// This is the last instance in the same block node,
// so move to the next block.
goto next
}
if err != nil {
// There are some more instances of the same block, so add the offset
// and be optimistic that you will find the available bit in the next
// instance of the same block.
bitOffset = 0
byteOffset += blockBytes
precBlocks++
goto retry
}
return byteOffset + bytePos, bitPos, err
}
// Moving to next block: Reset bit offset.
next:
bitOffset = 0
byteOffset += (current.count * blockBytes) - firstOffset
firstOffset = 0
byteOffset += (current.count * blockBytes) - (precBlocks * blockBytes)
precBlocks = 0
current = current.next
}
return invalidPos, invalidPos, ErrNoBitAvailable
@ -526,19 +550,20 @@ func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) {
// This can be further optimized to check from start till curr in case of a rollover
func getAvailableFromCurrent(head *sequence, start, curr, end uint64) (uint64, uint64, error) {
var bytePos, bitPos uint64
var err error
if curr != 0 && curr > start {
bytePos, bitPos, _ = getFirstAvailable(head, curr)
bytePos, bitPos, err = getFirstAvailable(head, curr)
ret := posToOrdinal(bytePos, bitPos)
if end < ret {
if end < ret || err != nil {
goto begin
}
return bytePos, bitPos, nil
}
begin:
bytePos, bitPos, _ = getFirstAvailable(head, start)
bytePos, bitPos, err = getFirstAvailable(head, start)
ret := posToOrdinal(bytePos, bitPos)
if end < ret {
if end < ret || err != nil {
return invalidPos, invalidPos, ErrNoBitAvailable
}
return bytePos, bitPos, nil