From a52bcf48f27828904e88418b3e46df293e16d7cb Mon Sep 17 00:00:00 2001 From: Abhinandan Prativadi Date: Wed, 31 May 2017 13:33:51 -0700 Subject: [PATCH] Serializing bitseq alloc Previously the bitseq alloc was allocating the first available bit from the begining of the sequence. With this commit the bitseq alloc will proceed from the current allocation. This change will affect the way ipam and vni allocation is done currently. The ip allocation will be done sequentially from the previous allocation as opposed to the first available IP. Signed-off-by: Abhinandan Prativadi --- libnetwork/bitseq/sequence.go | 31 +++++++++++++++++++++++--- libnetwork/bitseq/store.go | 1 + libnetwork/libnetwork_internal_test.go | 2 +- 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/libnetwork/bitseq/sequence.go b/libnetwork/bitseq/sequence.go index 3946473d8b..ade40633f0 100644 --- a/libnetwork/bitseq/sequence.go +++ b/libnetwork/bitseq/sequence.go @@ -41,6 +41,7 @@ type Handle struct { id string dbIndex uint64 dbExists bool + curr uint64 store datastore.DataStore sync.Mutex } @@ -193,6 +194,7 @@ func (h *Handle) getCopy() *Handle { dbIndex: h.dbIndex, dbExists: h.dbExists, store: h.store, + curr: h.curr, } } @@ -323,10 +325,10 @@ func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64 bytePos, bitPos = ordinalToPos(ordinal) } else { if any { - bytePos, bitPos, err = getFirstAvailable(h.head, start) + bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, h.curr, end) ret = posToOrdinal(bytePos, bitPos) - if end < ret { - err = ErrNoBitAvailable + if err == nil { + h.curr = ret + 1 } } else { bytePos, bitPos, err = checkIfAvailable(h.head, ordinal) @@ -515,6 +517,29 @@ func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) { return invalidPos, invalidPos, ErrNoBitAvailable } +//getAvailableFromCurrent will look for available ordinal from the current ordinal. +// If none found then it will loop back to the start to check of the available bit. +//This can be further optimized to check from start till curr in case of a rollover +func getAvailableFromCurrent(head *sequence, start, curr, end uint64) (uint64, uint64, error) { + var bytePos, bitPos uint64 + if curr != 0 && curr > start { + bytePos, bitPos, _ = getFirstAvailable(head, curr) + ret := posToOrdinal(bytePos, bitPos) + if end < ret { + goto begin + } + return bytePos, bitPos, nil + } + +begin: + bytePos, bitPos, _ = getFirstAvailable(head, start) + ret := posToOrdinal(bytePos, bitPos) + if end < ret { + return invalidPos, invalidPos, ErrNoBitAvailable + } + return bytePos, bitPos, nil +} + // checkIfAvailable checks if the bit correspondent to the specified ordinal is unset // If the ordinal is beyond the sequence limits, a negative response is returned func checkIfAvailable(head *sequence, ordinal uint64) (uint64, uint64, error) { diff --git a/libnetwork/bitseq/store.go b/libnetwork/bitseq/store.go index 5448927eb1..cdb7f04264 100644 --- a/libnetwork/bitseq/store.go +++ b/libnetwork/bitseq/store.go @@ -87,6 +87,7 @@ func (h *Handle) CopyTo(o datastore.KVObject) error { dstH.dbIndex = h.dbIndex dstH.dbExists = h.dbExists dstH.store = h.store + dstH.curr = h.curr dstH.Unlock() return nil diff --git a/libnetwork/libnetwork_internal_test.go b/libnetwork/libnetwork_internal_test.go index 58742cf5e1..805f3aaa8f 100644 --- a/libnetwork/libnetwork_internal_test.go +++ b/libnetwork/libnetwork_internal_test.go @@ -614,7 +614,7 @@ func TestIpamReleaseOnNetDriverFailures(t *testing.T) { } defer ep.Delete(false) - expectedIP, _ := types.ParseCIDR("10.34.0.1/16") + expectedIP, _ := types.ParseCIDR("10.34.0.2/16") if !types.CompareIPNet(ep.Info().Iface().Address(), expectedIP) { t.Fatalf("Ipam release must have failed, endpoint has unexpected address: %v", ep.Info().Iface().Address()) }