2012-08-01 08:27:02 -04:00
|
|
|
/*******************************************************************************
|
2011-11-27 10:55:44 -05:00
|
|
|
|
2012-08-01 08:27:02 -04:00
|
|
|
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012.
|
2011-11-27 10:55:44 -05:00
|
|
|
|
|
|
|
This file is part of LibMaxsi.
|
|
|
|
|
|
|
|
LibMaxsi is free software: you can redistribute it and/or modify it under
|
|
|
|
the terms of the GNU Lesser General Public License as published by the Free
|
|
|
|
Software Foundation, either version 3 of the License, or (at your option)
|
|
|
|
any later version.
|
|
|
|
|
|
|
|
LibMaxsi is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
2012-08-01 08:27:02 -04:00
|
|
|
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
|
|
|
|
details.
|
2011-11-27 10:55:44 -05:00
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public License
|
|
|
|
along with LibMaxsi. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
heap.cpp
|
|
|
|
Functions that allocate/free memory from a dynamic memory heap.
|
|
|
|
|
2012-08-01 08:27:02 -04:00
|
|
|
*******************************************************************************/
|
2011-11-27 10:55:44 -05:00
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
#include <sys/mman.h>
|
2012-02-11 20:03:34 -05:00
|
|
|
#include <libmaxsi/platform.h>
|
|
|
|
#include <libmaxsi/memory.h>
|
|
|
|
#include <libmaxsi/error.h>
|
2011-11-27 10:55:44 -05:00
|
|
|
|
|
|
|
#ifdef SORTIX_KERNEL
|
|
|
|
#define HEAP_GROWS_DOWNWARDS
|
|
|
|
#endif
|
|
|
|
|
2011-12-16 07:24:49 -05:00
|
|
|
#ifndef SORTIX_KERNEL
|
2012-09-07 15:46:43 -04:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2011-12-16 07:24:49 -05:00
|
|
|
#include <unistd.h>
|
2012-09-07 15:46:43 -04:00
|
|
|
#include <assert.h>
|
|
|
|
#undef ASSERT
|
|
|
|
#define ASSERT(invariant) assert(invariant)
|
2011-12-16 07:24:49 -05:00
|
|
|
#endif
|
|
|
|
|
2011-12-01 04:45:44 -05:00
|
|
|
#define PARANOIA 1
|
2011-11-27 10:55:44 -05:00
|
|
|
|
|
|
|
#ifdef SORTIX_KERNEL
|
2012-03-21 19:52:29 -04:00
|
|
|
#include <sortix/kernel/platform.h>
|
2012-08-01 08:27:02 -04:00
|
|
|
#include <sortix/kernel/kthread.h>
|
2012-03-21 19:52:29 -04:00
|
|
|
#include <sortix/kernel/log.h> // DEBUG
|
|
|
|
#include <sortix/kernel/memorymanagement.h>
|
|
|
|
#include <sortix/kernel/panic.h>
|
2011-11-27 10:55:44 -05:00
|
|
|
#endif
|
|
|
|
|
|
|
|
namespace Maxsi
|
|
|
|
{
|
|
|
|
namespace Memory
|
|
|
|
{
|
|
|
|
//
|
|
|
|
// This first section is just magic compiler/platform stuff, you should
|
|
|
|
// skip ahead to the actual algorithm.
|
|
|
|
//
|
|
|
|
|
|
|
|
#ifdef PLATFORM_X64
|
|
|
|
const size_t MAGIC = 0xDEADDEADDEADDEADUL;
|
|
|
|
const size_t ALIGNMENT = 16UL;
|
|
|
|
#else
|
|
|
|
const size_t MAGIC = 0xDEADDEADUL;
|
|
|
|
const size_t ALIGNMENT = 8UL;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
const size_t PAGESIZE = 4UL * 1024UL; // 4 KiB
|
|
|
|
const size_t NUMBINS = 8UL * sizeof(size_t);
|
|
|
|
|
2011-12-16 07:24:49 -05:00
|
|
|
extern addr_t wilderness;
|
|
|
|
|
|
|
|
#ifdef SORTIX_KERNEL
|
2011-11-27 10:55:44 -05:00
|
|
|
addr_t GetHeapStart()
|
|
|
|
{
|
2011-12-22 08:13:18 -05:00
|
|
|
return Sortix::Memory::GetHeapUpper();
|
2011-11-27 10:55:44 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t GetHeapMaxSize()
|
|
|
|
{
|
2011-12-22 08:13:18 -05:00
|
|
|
return Sortix::Memory::GetHeapUpper() - Sortix::Memory::GetHeapLower();
|
2011-11-27 10:55:44 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void FreeMemory(addr_t where, size_t bytes)
|
|
|
|
{
|
2012-03-21 12:19:26 -04:00
|
|
|
ASSERT(Sortix::Page::IsAligned(where + bytes));
|
2011-11-27 10:55:44 -05:00
|
|
|
|
|
|
|
while ( bytes )
|
|
|
|
{
|
2012-03-21 12:19:26 -04:00
|
|
|
addr_t page = Sortix::Memory::Unmap(where);
|
2011-11-27 10:55:44 -05:00
|
|
|
Sortix::Page::Put(page);
|
|
|
|
|
|
|
|
bytes -= PAGESIZE;
|
|
|
|
where += PAGESIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AllocateMemory(addr_t where, size_t bytes)
|
|
|
|
{
|
2012-03-21 12:19:26 -04:00
|
|
|
ASSERT(Sortix::Page::IsAligned(where + bytes));
|
2011-11-27 10:55:44 -05:00
|
|
|
|
|
|
|
addr_t pos = where;
|
|
|
|
|
|
|
|
while ( bytes )
|
|
|
|
{
|
|
|
|
addr_t page = Sortix::Page::Get();
|
|
|
|
if ( !page )
|
|
|
|
{
|
|
|
|
FreeMemory(where, pos-where);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
if ( !Sortix::Memory::Map(page, pos, PROT_KREAD | PROT_KWRITE) )
|
2011-11-27 10:55:44 -05:00
|
|
|
{
|
|
|
|
Sortix::Page::Put(page);
|
|
|
|
FreeMemory(where, pos-where);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytes -= PAGESIZE;
|
|
|
|
pos += PAGESIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2011-12-16 07:24:49 -05:00
|
|
|
|
|
|
|
bool ExtendHeap(size_t bytesneeded)
|
|
|
|
{
|
|
|
|
#ifdef HEAP_GROWS_DOWNWARDS
|
|
|
|
addr_t newwilderness = wilderness - bytesneeded;
|
|
|
|
#else
|
|
|
|
addr_t newwilderness = wilderness + bytesneeded;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return AllocateMemory(newwilderness, bytesneeded);
|
|
|
|
}
|
2011-11-27 10:55:44 -05:00
|
|
|
#else
|
2011-12-16 07:24:49 -05:00
|
|
|
addr_t GetHeapStart()
|
2011-11-27 10:55:44 -05:00
|
|
|
{
|
2011-12-16 07:24:49 -05:00
|
|
|
addr_t base = (addr_t) sbrk(0);
|
|
|
|
addr_t unaligned = base % ALIGNMENT;
|
|
|
|
if ( unaligned )
|
|
|
|
{
|
|
|
|
sbrk(ALIGNMENT-unaligned);
|
|
|
|
}
|
|
|
|
addr_t result = (addr_t) sbrk(0);
|
|
|
|
return result;
|
2011-11-27 10:55:44 -05:00
|
|
|
}
|
|
|
|
|
2011-12-16 07:24:49 -05:00
|
|
|
size_t GetHeapMaxSize()
|
|
|
|
{
|
|
|
|
// TODO: A bit of a hack!
|
|
|
|
return SIZE_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ExtendHeap(size_t bytesneeded)
|
2011-11-27 10:55:44 -05:00
|
|
|
{
|
2011-12-16 07:24:49 -05:00
|
|
|
void* newheapend = sbrk(bytesneeded);
|
|
|
|
return newheapend != (void*) -1UL;
|
2011-11-27 10:55:44 -05:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// TODO: BitScanForward and BitScanReverse are x86 instructions, but
|
|
|
|
// directly using them messes with the optimizer. Once possible, use
|
|
|
|
// the inline assembly instead of the C-version of the functions.
|
|
|
|
|
|
|
|
// Returns the index of the most significant set bit.
|
|
|
|
inline size_t BSR(size_t Value)
|
|
|
|
{
|
|
|
|
#if 1
|
|
|
|
ASSERT(Value > 0);
|
|
|
|
for ( size_t I = 8*sizeof(size_t); I > 0; I-- )
|
|
|
|
{
|
2011-12-01 04:45:44 -05:00
|
|
|
if ( Value & ( 1UL << (I-1) ) ) { return I-1; }
|
2011-11-27 10:55:44 -05:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
size_t Result;
|
|
|
|
asm("bsr %0, %1" : "=r"(Result) : "r"(Value));
|
|
|
|
return Result;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the index of the least significant set bit.
|
|
|
|
inline size_t BSF(size_t Value)
|
|
|
|
{
|
|
|
|
#if 1
|
|
|
|
ASSERT(Value > 0);
|
|
|
|
for ( size_t I = 0; I < 8*sizeof(size_t); I++ )
|
|
|
|
{
|
2011-12-01 04:45:44 -05:00
|
|
|
if ( Value & ( 1UL << I ) ) { return I; }
|
2011-11-27 10:55:44 -05:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
size_t Result;
|
|
|
|
asm("bsf %0, %1" : "=r"(Result) : "r"(Value));
|
|
|
|
return Result;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
2011-12-04 14:59:42 -05:00
|
|
|
// Now for some helper functions and structures.
|
2011-11-27 10:55:44 -05:00
|
|
|
//
|
|
|
|
|
|
|
|
struct Chunk;
|
|
|
|
struct Trailer;
|
|
|
|
|
2012-08-01 08:27:02 -04:00
|
|
|
#ifdef SORTIX_KERNEL
|
|
|
|
Sortix::kthread_mutex_t heaplock;
|
|
|
|
#endif
|
|
|
|
|
2011-11-27 10:55:44 -05:00
|
|
|
// The location where the heap originally grows from.
|
|
|
|
addr_t heapstart;
|
|
|
|
|
|
|
|
// If heap grows down: Location of the first mapped page.
|
|
|
|
// If heap grows up: Location of the first not-mapped page.
|
|
|
|
addr_t wilderness;
|
|
|
|
|
|
|
|
// How many bytes remain in the wilderness.
|
|
|
|
size_t wildernesssize;
|
|
|
|
|
|
|
|
// How many bytes are the heap allow to grow to (including wilderness).
|
|
|
|
size_t heapmaxsize;
|
|
|
|
|
|
|
|
// How many bytes are currently used for chunks in the heap, which
|
|
|
|
// excludes the wilderness.
|
|
|
|
size_t heapsize;
|
|
|
|
|
|
|
|
// bins[N] contain a linked list of chunks that are at least 2^(N+1)
|
|
|
|
// bytes, but less than 2^(N+2) bytes. By selecting the proper bin in
|
|
|
|
// constant time, we can allocate chunks in constant time.
|
|
|
|
Chunk* bins[NUMBINS];
|
|
|
|
|
|
|
|
// Bit N is set if bin[N] contains a chunk.
|
|
|
|
size_t bincontainschunks;
|
|
|
|
|
2012-09-07 15:46:43 -04:00
|
|
|
static bool IsGoodHeapPointer(void* ptr, size_t size)
|
|
|
|
{
|
|
|
|
uintptr_t ptrlower = (uintptr_t) ptr;
|
|
|
|
uintptr_t ptrupper = ptrlower + size;
|
|
|
|
#ifdef HEAP_GROWS_DOWNWARDS
|
|
|
|
uintptr_t heaplower = wilderness;
|
|
|
|
uintptr_t heapupper = heapstart;
|
|
|
|
#else
|
|
|
|
uintptr_t heaplower = heapstart;
|
|
|
|
uintptr_t heapupper = wilderness;
|
|
|
|
#endif
|
|
|
|
return heaplower <= ptrlower && ptrupper <= heapupper;
|
|
|
|
}
|
|
|
|
|
2011-11-27 10:55:44 -05:00
|
|
|
// A preamble to every chunk providing meta-information.
|
|
|
|
struct Chunk
|
|
|
|
{
|
|
|
|
public:
|
2012-02-12 18:31:05 -05:00
|
|
|
size_t size; // Includes size of Chunk and Trailer
|
2011-11-27 10:55:44 -05:00
|
|
|
union
|
|
|
|
{
|
|
|
|
size_t magic;
|
|
|
|
Chunk* nextunused;
|
|
|
|
};
|
|
|
|
|
|
|
|
public:
|
|
|
|
bool IsUsed() { return magic == MAGIC; }
|
|
|
|
Trailer* GetTrailer();
|
|
|
|
Chunk* LeftNeighbor();
|
|
|
|
Chunk* RightNeighbor();
|
|
|
|
bool IsSane();
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
// A trailer ro every chunk providing meta-information.
|
|
|
|
struct Trailer
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
union
|
|
|
|
{
|
|
|
|
size_t magic;
|
|
|
|
Chunk* prevunused;
|
|
|
|
};
|
2012-02-12 18:31:05 -05:00
|
|
|
size_t size; // Includes size of Chunk and Trailer
|
2011-11-27 10:55:44 -05:00
|
|
|
|
|
|
|
public:
|
|
|
|
bool IsUsed() { return magic == MAGIC; }
|
|
|
|
Chunk* GetChunk();
|
|
|
|
|
|
|
|
};
|
|
|
|
|
2012-02-12 18:31:05 -05:00
|
|
|
const size_t OVERHEAD = sizeof(Chunk) + sizeof(Trailer);
|
|
|
|
|
2011-11-27 10:55:44 -05:00
|
|
|
// This is how a real chunk actually looks:
|
|
|
|
//struct RealChunk
|
|
|
|
//{
|
|
|
|
// Chunk header;
|
|
|
|
// byte data[...];
|
|
|
|
// Trailer footer;
|
|
|
|
// };
|
|
|
|
|
|
|
|
Trailer* Chunk::GetTrailer()
|
|
|
|
{
|
|
|
|
return (Trailer*) (((addr_t) this) + size - sizeof(Trailer));
|
|
|
|
}
|
|
|
|
|
|
|
|
Chunk* Chunk::LeftNeighbor()
|
|
|
|
{
|
|
|
|
Trailer* trailer = (Trailer*) (((addr_t) this) - sizeof(Trailer));
|
|
|
|
return trailer->GetChunk();
|
|
|
|
}
|
|
|
|
|
|
|
|
Chunk* Chunk::RightNeighbor()
|
|
|
|
{
|
|
|
|
return (Chunk*) (((addr_t) this) + size);
|
|
|
|
}
|
|
|
|
|
|
|
|
Chunk* Trailer::GetChunk()
|
|
|
|
{
|
|
|
|
return (Chunk*) (((addr_t) this) + sizeof(Trailer) - size);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Chunk::IsSane()
|
|
|
|
{
|
2012-09-07 15:46:43 -04:00
|
|
|
if ( !IsGoodHeapPointer(this, sizeof(*this)) )
|
|
|
|
return false;
|
2011-12-01 04:45:44 -05:00
|
|
|
if ( !size ) { return false; }
|
2011-11-27 10:55:44 -05:00
|
|
|
size_t binindex = BSR(size);
|
|
|
|
Trailer* trailer = GetTrailer();
|
2012-09-07 15:46:43 -04:00
|
|
|
if ( !IsGoodHeapPointer(trailer, sizeof(*trailer)) )
|
|
|
|
return false;
|
2011-11-27 10:55:44 -05:00
|
|
|
if ( trailer->size != size ) { return false; }
|
|
|
|
if ( IsUsed() )
|
|
|
|
{
|
|
|
|
if ( bins[binindex] == this ) { return false; }
|
|
|
|
if ( magic != MAGIC || trailer->magic != magic ) { return false; }
|
|
|
|
}
|
|
|
|
if ( !IsUsed() )
|
|
|
|
{
|
|
|
|
if ( ((addr_t) nextunused) & (ALIGNMENT-1UL) ) { return false; }
|
|
|
|
if ( ((addr_t) trailer->prevunused) & (ALIGNMENT-1UL) ) { return false; }
|
2012-09-07 15:46:43 -04:00
|
|
|
if ( nextunused && !IsGoodHeapPointer(nextunused->GetTrailer(),
|
|
|
|
sizeof(Trailer)) )
|
|
|
|
return false;
|
2011-11-27 10:55:44 -05:00
|
|
|
if ( nextunused && nextunused->GetTrailer()->prevunused != this ) { return false; }
|
|
|
|
|
|
|
|
if ( trailer->prevunused )
|
|
|
|
{
|
2012-09-07 15:46:43 -04:00
|
|
|
if ( !IsGoodHeapPointer(trailer->prevunused,
|
|
|
|
sizeof(*trailer->prevunused)) )
|
|
|
|
return false;
|
2011-11-27 10:55:44 -05:00
|
|
|
if ( bins[binindex] == this ) { return false; }
|
|
|
|
if ( trailer->prevunused->nextunused != this ) { return false; }
|
|
|
|
}
|
|
|
|
if ( !trailer->prevunused )
|
|
|
|
{
|
|
|
|
if ( bins[binindex] != this ) { return false; }
|
|
|
|
if ( !(bincontainschunks & (1UL << binindex)) ) { return false; }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void InsertChunk(Chunk* chunk)
|
|
|
|
{
|
|
|
|
// Insert the chunk into the right bin.
|
|
|
|
size_t binindex = BSR(chunk->size);
|
|
|
|
chunk->GetTrailer()->prevunused = NULL;
|
|
|
|
chunk->nextunused = bins[binindex];
|
|
|
|
if ( chunk->nextunused )
|
|
|
|
{
|
|
|
|
ASSERT(chunk->nextunused->IsSane());
|
|
|
|
chunk->nextunused->GetTrailer()->prevunused = chunk;
|
|
|
|
}
|
|
|
|
bins[binindex] = chunk;
|
|
|
|
bincontainschunks |= (1UL << binindex);
|
|
|
|
ASSERT(chunk->IsSane());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ValidateHeap()
|
|
|
|
{
|
2011-12-01 04:45:44 -05:00
|
|
|
bool foundbin[NUMBINS];
|
|
|
|
for ( size_t i = 0; i < NUMBINS; i++ ) { foundbin[i] = false; }
|
|
|
|
|
2011-11-27 10:55:44 -05:00
|
|
|
#ifdef HEAP_GROWS_DOWNWARDS
|
|
|
|
Chunk* chunk = (Chunk*) (wilderness + wildernesssize);
|
|
|
|
while ( (addr_t) chunk < heapstart )
|
|
|
|
#else
|
|
|
|
Chunk* chunk = (Chunk*) heapstart;
|
|
|
|
while ( (addr_t) chunk < wilderness - wildernesssize )
|
|
|
|
#endif
|
|
|
|
{
|
2011-12-01 04:45:44 -05:00
|
|
|
size_t timesfound = 0;
|
|
|
|
for ( size_t i = 0; i < NUMBINS; i++ )
|
|
|
|
{
|
|
|
|
if ( chunk == bins[i] ) { foundbin[i] = true; timesfound++; }
|
|
|
|
}
|
|
|
|
if ( 1 < timesfound ) { return false; }
|
|
|
|
|
2011-11-27 10:55:44 -05:00
|
|
|
if ( !chunk->IsSane() ) { return false; }
|
|
|
|
chunk = chunk->RightNeighbor();
|
|
|
|
}
|
|
|
|
|
2011-12-01 04:45:44 -05:00
|
|
|
for ( size_t i = 0; i < NUMBINS; i++ )
|
|
|
|
{
|
|
|
|
if ( !bins[i] )
|
|
|
|
{
|
|
|
|
if ( foundbin[i] ) { return false; }
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ( !foundbin[i] ) { return false; }
|
|
|
|
if ( !bins[i]->IsSane() ) { return false; }
|
|
|
|
}
|
|
|
|
|
2011-11-27 10:55:44 -05:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-12-04 14:59:42 -05:00
|
|
|
//
|
|
|
|
// This is where the actual memory allocation algorithm starts.
|
|
|
|
//
|
|
|
|
|
2011-11-27 10:55:44 -05:00
|
|
|
void Init()
|
|
|
|
{
|
|
|
|
heapstart = GetHeapStart();
|
|
|
|
heapmaxsize = GetHeapMaxSize();
|
|
|
|
heapsize = 0;
|
|
|
|
wilderness = heapstart;
|
|
|
|
wildernesssize = 0;
|
|
|
|
for ( size_t i = 0; i < NUMBINS; i++ ) { bins[i] = NULL; }
|
|
|
|
bincontainschunks = 0;
|
2012-08-01 08:27:02 -04:00
|
|
|
#ifdef SORTIX_KERNEL
|
|
|
|
heaplock = Sortix::KTHREAD_MUTEX_INITIALIZER;
|
|
|
|
#endif
|
2011-11-27 10:55:44 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Attempts to expand the wilderness such that it contains at least
|
|
|
|
// bytesneeded bytes. This is done by mapping new pages onto into the
|
|
|
|
// virtual address-space.
|
|
|
|
bool ExpandWilderness(size_t bytesneeded)
|
|
|
|
{
|
|
|
|
if ( bytesneeded <= wildernesssize ) { return true; }
|
|
|
|
|
|
|
|
bytesneeded -= wildernesssize;
|
|
|
|
|
|
|
|
// Align the increase on page boundaries.
|
|
|
|
const size_t PAGEMASK = ~(PAGESIZE - 1UL);
|
|
|
|
bytesneeded = ( bytesneeded + PAGESIZE - 1UL ) & PAGEMASK;
|
|
|
|
|
|
|
|
ASSERT(bytesneeded >= PAGESIZE);
|
|
|
|
|
|
|
|
// TODO: Overflow MAY happen here!
|
|
|
|
if ( heapmaxsize <= heapsize + wildernesssize + bytesneeded )
|
|
|
|
{
|
|
|
|
Error::Set(ENOMEM);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HEAP_GROWS_DOWNWARDS
|
|
|
|
addr_t newwilderness = wilderness - bytesneeded;
|
|
|
|
#else
|
|
|
|
addr_t newwilderness = wilderness + bytesneeded;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Attempt to map pages so our wilderness grows.
|
2011-12-16 07:24:49 -05:00
|
|
|
if ( !ExtendHeap(bytesneeded) ) { return false; }
|
2011-11-27 10:55:44 -05:00
|
|
|
|
|
|
|
wildernesssize += bytesneeded;
|
|
|
|
wilderness = newwilderness;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
DUAL_FUNCTION(void*, malloc, Allocate, (size_t size))
|
|
|
|
{
|
2012-08-01 08:27:02 -04:00
|
|
|
#ifdef SORTIX_KERNEL
|
|
|
|
Sortix::ScopedLock scopedlock(&heaplock);
|
|
|
|
#endif
|
|
|
|
|
2012-01-20 17:22:12 -05:00
|
|
|
#if 2 <= PARANOIA
|
2011-11-27 10:55:44 -05:00
|
|
|
ASSERT(ValidateHeap());
|
|
|
|
#endif
|
|
|
|
|
2012-02-12 18:31:05 -05:00
|
|
|
// The size field keeps both the allocation and meta information.
|
2011-11-27 10:55:44 -05:00
|
|
|
size += OVERHEAD;
|
|
|
|
|
|
|
|
// Round up to nearest alignment.
|
|
|
|
size = (size + ALIGNMENT - 1UL) & (~(ALIGNMENT-1UL));
|
|
|
|
|
|
|
|
// Find the index of the smallest usable bin.
|
|
|
|
size_t minbinindex = BSR(size-1UL)+1UL;
|
|
|
|
|
|
|
|
// Make a bitmask that filter away all bins that are too small.
|
|
|
|
size_t minbinmask = ~((1UL << minbinindex) - 1UL);
|
|
|
|
|
|
|
|
// Figure out which bins are usable for our chunk.
|
|
|
|
size_t availablebins = bincontainschunks & minbinmask;
|
|
|
|
|
|
|
|
if ( availablebins )
|
|
|
|
{
|
|
|
|
// Find the smallest available bin.
|
|
|
|
size_t binindex = BSF(availablebins);
|
|
|
|
|
|
|
|
Chunk* chunk = bins[binindex];
|
|
|
|
ASSERT(chunk->IsSane());
|
|
|
|
bins[binindex] = chunk->nextunused;
|
|
|
|
|
|
|
|
size_t binsize = 1UL << binindex;
|
|
|
|
|
|
|
|
// Mark the bin as empty if we emptied it.
|
|
|
|
if ( !bins[binindex] )
|
|
|
|
{
|
|
|
|
bincontainschunks ^= binsize;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
Trailer* trailer = bins[binindex]->GetTrailer();
|
|
|
|
trailer->prevunused = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(!bins[binindex] || bins[binindex]->IsSane());
|
|
|
|
|
|
|
|
// If we don't use the entire chunk.
|
|
|
|
if ( OVERHEAD <= binsize - size )
|
|
|
|
{
|
|
|
|
size_t left = binsize - size;
|
|
|
|
chunk->size -= left;
|
|
|
|
chunk->GetTrailer()->size = chunk->size;
|
|
|
|
|
|
|
|
size_t leftbinindex = BSR(left);
|
|
|
|
Chunk* leftchunk = chunk->RightNeighbor();
|
|
|
|
leftchunk->size = left;
|
|
|
|
Trailer* lefttrailer = leftchunk->GetTrailer();
|
|
|
|
lefttrailer->size = left;
|
|
|
|
|
|
|
|
InsertChunk(leftchunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
chunk->magic = MAGIC;
|
|
|
|
chunk->GetTrailer()->magic = MAGIC;
|
|
|
|
|
2012-01-20 17:22:12 -05:00
|
|
|
#if 2 <= PARANOIA
|
2011-11-27 10:55:44 -05:00
|
|
|
ASSERT(ValidateHeap());
|
|
|
|
#endif
|
|
|
|
|
|
|
|
addr_t result = ((addr_t) chunk) + sizeof(Chunk);
|
|
|
|
return (void*) result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If no bins are available, try to allocate from the wilderness.
|
|
|
|
|
|
|
|
// Check if the wilderness can meet our requirements.
|
|
|
|
if ( wildernesssize < size && !ExpandWilderness(size) )
|
|
|
|
{
|
|
|
|
Error::Set(ENOMEM);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Carve a new chunk out of the wilderness and initialize it.
|
|
|
|
#ifdef HEAP_GROWS_DOWNWARDS
|
|
|
|
Chunk* chunk = (Chunk*) (wilderness + wildernesssize - size);
|
|
|
|
#else
|
|
|
|
Chunk* chunk = (Chunk*) (wilderness - wildernesssize);
|
|
|
|
#endif
|
2012-09-07 15:46:43 -04:00
|
|
|
ASSERT(size <= wildernesssize);
|
2011-11-27 10:55:44 -05:00
|
|
|
wildernesssize -= size;
|
|
|
|
heapsize += size;
|
2012-09-07 15:46:43 -04:00
|
|
|
ASSERT(IsGoodHeapPointer(chunk, sizeof(*chunk)));
|
2011-11-27 10:55:44 -05:00
|
|
|
chunk->size = size;
|
|
|
|
Trailer* trailer = chunk->GetTrailer();
|
2012-09-07 15:46:43 -04:00
|
|
|
ASSERT(IsGoodHeapPointer(trailer, sizeof(*trailer)));
|
2011-11-27 10:55:44 -05:00
|
|
|
trailer->size = size;
|
|
|
|
chunk->magic = MAGIC;
|
|
|
|
trailer->magic = MAGIC;
|
|
|
|
|
2012-01-20 17:22:12 -05:00
|
|
|
#if 2 <= PARANOIA
|
2011-11-27 10:55:44 -05:00
|
|
|
ASSERT(ValidateHeap());
|
|
|
|
#endif
|
|
|
|
|
|
|
|
addr_t result = ((addr_t) chunk) + sizeof(Chunk);
|
|
|
|
return (void*) result;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsLeftmostChunk(Chunk* chunk)
|
|
|
|
{
|
|
|
|
#ifdef HEAP_GROWS_DOWNWARDS
|
|
|
|
return (addr_t) chunk <= wilderness + wildernesssize;
|
|
|
|
#else
|
|
|
|
return heapstart <= (addr_t) chunk;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
bool IsRightmostChunk(Chunk* chunk)
|
|
|
|
{
|
|
|
|
#ifdef HEAP_GROWS_DOWNWARDS
|
|
|
|
return heapstart <= (addr_t) chunk + chunk->size;
|
|
|
|
#else
|
2011-12-16 07:24:49 -05:00
|
|
|
return heapstart + heapsize <= (addr_t) chunk + chunk->size;
|
2011-11-27 10:55:44 -05:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// Removes a chunk from its bin.
|
|
|
|
void UnlinkChunk(Chunk* chunk)
|
|
|
|
{
|
|
|
|
ASSERT(chunk->IsSane());
|
|
|
|
Trailer* trailer = chunk->GetTrailer();
|
|
|
|
if ( trailer->prevunused )
|
|
|
|
{
|
|
|
|
ASSERT(trailer->prevunused->IsSane());
|
|
|
|
trailer->prevunused->nextunused = chunk->nextunused;
|
|
|
|
if ( chunk->nextunused )
|
|
|
|
{
|
|
|
|
ASSERT(chunk->nextunused->IsSane());
|
|
|
|
chunk->nextunused->GetTrailer()->prevunused = trailer->prevunused;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if ( chunk->nextunused )
|
|
|
|
{
|
|
|
|
ASSERT(chunk->nextunused->IsSane());
|
|
|
|
chunk->nextunused->GetTrailer()->prevunused = NULL;
|
|
|
|
}
|
|
|
|
size_t binindex = BSR(chunk->size);
|
|
|
|
ASSERT(bins[binindex] == chunk);
|
|
|
|
bins[binindex] = chunk->nextunused;
|
|
|
|
if ( !bins[binindex] ) { bincontainschunks ^= 1UL << binindex; }
|
|
|
|
else { ASSERT(bins[binindex]->IsSane()); }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transforms a chunk and its neighbors into a single chunk if possible.
|
|
|
|
void UnifyNeighbors(Chunk** chunk)
|
|
|
|
{
|
|
|
|
if ( !IsLeftmostChunk(*chunk) )
|
|
|
|
{
|
|
|
|
Chunk* neighbor = (*chunk)->LeftNeighbor();
|
|
|
|
if ( !neighbor->IsUsed() )
|
|
|
|
{
|
|
|
|
size_t size = neighbor->size;
|
|
|
|
size_t chunksize = (*chunk)->size;
|
|
|
|
UnlinkChunk(neighbor);
|
|
|
|
*chunk = neighbor;
|
|
|
|
(*chunk)->size = size + chunksize;
|
|
|
|
(*chunk)->GetTrailer()->size = (*chunk)->size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( !IsRightmostChunk(*chunk) )
|
|
|
|
{
|
|
|
|
Chunk* neighbor = (*chunk)->RightNeighbor();
|
|
|
|
if ( !neighbor->IsUsed() )
|
|
|
|
{
|
|
|
|
UnlinkChunk(neighbor);
|
|
|
|
(*chunk)->size += neighbor->size;
|
|
|
|
(*chunk)->GetTrailer()->size = (*chunk)->size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DUAL_FUNCTION(void, free, Free, (void* addr))
|
|
|
|
{
|
2012-08-01 08:27:02 -04:00
|
|
|
#ifdef SORTIX_KERNEL
|
|
|
|
Sortix::ScopedLock scopedlock(&heaplock);
|
|
|
|
#endif
|
|
|
|
|
2012-01-20 17:22:12 -05:00
|
|
|
#if 2 <= PARANOIA
|
2011-11-27 10:55:44 -05:00
|
|
|
ASSERT(ValidateHeap());
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if ( !addr) { return; }
|
|
|
|
Chunk* chunk = (Chunk*) ((addr_t) addr - sizeof(Chunk));
|
|
|
|
ASSERT(chunk->IsUsed());
|
|
|
|
ASSERT(chunk->IsSane());
|
|
|
|
|
|
|
|
UnifyNeighbors(&chunk);
|
|
|
|
|
|
|
|
#ifdef HEAP_GROWS_DOWNWARDS
|
|
|
|
bool nexttowilderness = IsLeftmostChunk(chunk);
|
|
|
|
#else
|
|
|
|
bool nexttowilderness = IsRightmostChunk(chunk);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// If possible, let the wilderness regain the memory.
|
|
|
|
if ( nexttowilderness )
|
|
|
|
{
|
|
|
|
heapsize -= chunk->size;
|
|
|
|
wildernesssize += chunk->size;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
InsertChunk(chunk);
|
|
|
|
|
2012-01-20 17:22:12 -05:00
|
|
|
#if 2 <= PARANOIA
|
2011-11-27 10:55:44 -05:00
|
|
|
ASSERT(ValidateHeap());
|
|
|
|
#endif
|
|
|
|
}
|
2011-12-23 22:05:38 -05:00
|
|
|
|
|
|
|
extern "C" void* calloc(size_t nmemb, size_t size)
|
|
|
|
{
|
|
|
|
size_t total = nmemb * size;
|
|
|
|
void* result = Allocate(total);
|
2012-03-21 19:52:29 -04:00
|
|
|
if ( !result ) { return NULL; }
|
2011-12-23 22:05:38 -05:00
|
|
|
Memory::Set(result, 0, total);
|
|
|
|
return result;
|
|
|
|
}
|
2012-02-12 18:31:05 -05:00
|
|
|
|
|
|
|
// TODO: Implement this function properly.
|
|
|
|
extern "C" void* realloc(void* ptr, size_t size)
|
|
|
|
{
|
|
|
|
if ( !ptr ) { return Allocate(size); }
|
|
|
|
Chunk* chunk = (Chunk*) ((addr_t) ptr - sizeof(Chunk));
|
|
|
|
ASSERT(chunk->IsUsed());
|
|
|
|
ASSERT(chunk->IsSane());
|
|
|
|
size_t allocsize = chunk->size - OVERHEAD;
|
2012-03-07 14:52:44 -05:00
|
|
|
if ( size < allocsize ) { return ptr; }
|
2012-02-12 18:31:05 -05:00
|
|
|
void* newptr = Allocate(size);
|
|
|
|
if ( !newptr ) { return NULL; }
|
|
|
|
Memory::Copy(newptr, ptr, allocsize);
|
|
|
|
Free(ptr);
|
|
|
|
return newptr;
|
|
|
|
}
|
2011-11-27 10:55:44 -05:00
|
|
|
}
|
|
|
|
}
|
2011-12-16 07:24:49 -05:00
|
|
|
|
|
|
|
void* operator new(size_t Size) { return Maxsi::Memory::Allocate(Size); }
|
|
|
|
void* operator new[](size_t Size) { return Maxsi::Memory::Allocate(Size); }
|
|
|
|
void operator delete (void* Addr) { return Maxsi::Memory::Free(Addr); };
|
|
|
|
void operator delete[](void* Addr) { return Maxsi::Memory::Free(Addr); };
|
|
|
|
|