mirror of
https://gitlab.com/sortix/sortix.git
synced 2023-02-13 20:55:38 -05:00
Maintain counts of physical frames used for particular purposes.
This commit is contained in:
parent
5f2106f512
commit
749d123331
10 changed files with 100 additions and 61 deletions
|
@ -159,7 +159,7 @@ addr_t Construct32(Process* process, const uint8_t* file, size_t filelen,
|
|||
|
||||
assert(process == CurrentProcess());
|
||||
|
||||
if ( !Memory::MapRange(segment.addr, segment.size, prot) )
|
||||
if ( !Memory::MapRange(segment.addr, segment.size, prot, PAGE_USAGE_USER_SPACE) )
|
||||
{
|
||||
kthread_mutex_unlock(&process->segment_lock);
|
||||
process->ResetAddressSpace();
|
||||
|
@ -168,7 +168,7 @@ addr_t Construct32(Process* process, const uint8_t* file, size_t filelen,
|
|||
|
||||
if ( !AddSegment(process, &segment) )
|
||||
{
|
||||
Memory::UnmapRange(segment.addr, segment.size);
|
||||
Memory::UnmapRange(segment.addr, segment.size, PAGE_USAGE_USER_SPACE);
|
||||
kthread_mutex_unlock(&process->segment_lock);
|
||||
process->ResetAddressSpace();
|
||||
return 0;
|
||||
|
@ -368,7 +368,7 @@ addr_t Construct64(Process* process, const uint8_t* file, size_t filelen,
|
|||
|
||||
assert(process == CurrentProcess());
|
||||
|
||||
if ( !Memory::MapRange(segment.addr, segment.size, prot) )
|
||||
if ( !Memory::MapRange(segment.addr, segment.size, prot, PAGE_USAGE_USER_SPACE) )
|
||||
{
|
||||
kthread_mutex_unlock(&process->segment_lock);
|
||||
process->ResetAddressSpace();
|
||||
|
@ -377,7 +377,7 @@ addr_t Construct64(Process* process, const uint8_t* file, size_t filelen,
|
|||
|
||||
if ( !AddSegment(process, &segment) )
|
||||
{
|
||||
Memory::UnmapRange(segment.addr, segment.size);
|
||||
Memory::UnmapRange(segment.addr, segment.size, PAGE_USAGE_USER_SPACE);
|
||||
kthread_mutex_unlock(&process->segment_lock);
|
||||
process->ResetAddressSpace();
|
||||
return 0;
|
||||
|
|
|
@ -127,7 +127,7 @@ void BlockCache::ReleaseBlock(BlockCacheBlock* block)
|
|||
blocks_allocated--;
|
||||
uint8_t* block_data = BlockDataUnlocked(block);
|
||||
addr_t block_data_addr = Memory::Unmap((addr_t) block_data);
|
||||
Page::Put(block_data_addr);
|
||||
Page::Put(block_data_addr, PAGE_USAGE_FILESYSTEM_CACHE);
|
||||
// TODO: We leak this block's meta information here. Rather, we should
|
||||
// put this block into a list of non-present blocks so we can reuse it
|
||||
// later and reallocate a physical frame for it - then we will just
|
||||
|
@ -215,7 +215,7 @@ bool BlockCache::AddArea()
|
|||
goto cleanup_done;
|
||||
if ( !(area->blocks = new BlockCacheBlock[blocks_per_area]) )
|
||||
goto cleanup_addralloc;
|
||||
if ( !Memory::MapRange(area->addralloc.from, area->addralloc.size, prot) )
|
||||
if ( !Memory::MapRange(area->addralloc.from, area->addralloc.size, prot, PAGE_USAGE_FILESYSTEM_CACHE) )
|
||||
goto cleanup_blocks;
|
||||
Memory::Flush();
|
||||
blocks_allocated += blocks_per_area;
|
||||
|
|
|
@ -36,6 +36,20 @@ namespace Sortix {
|
|||
|
||||
class Process;
|
||||
|
||||
enum page_usage
|
||||
{
|
||||
PAGE_USAGE_OTHER,
|
||||
PAGE_USAGE_PHYSICAL,
|
||||
PAGE_USAGE_PAGING_OVERHEAD,
|
||||
PAGE_USAGE_KERNEL_HEAP,
|
||||
PAGE_USAGE_FILESYSTEM_CACHE,
|
||||
PAGE_USAGE_USER_SPACE,
|
||||
PAGE_USAGE_EXECUTE,
|
||||
PAGE_USAGE_DRIVER,
|
||||
PAGE_USAGE_NUM_KINDS,
|
||||
PAGE_USAGE_WASNT_ALLOCATED,
|
||||
};
|
||||
|
||||
} // namespace Sortix
|
||||
|
||||
namespace Sortix {
|
||||
|
@ -45,12 +59,12 @@ bool Reserve(size_t* counter, size_t amount);
|
|||
bool ReserveUnlocked(size_t* counter, size_t amount);
|
||||
bool Reserve(size_t* counter, size_t least, size_t ideal);
|
||||
bool ReserveUnlocked(size_t* counter, size_t least, size_t ideal);
|
||||
addr_t GetReserved(size_t* counter);
|
||||
addr_t GetReservedUnlocked(size_t* counter);
|
||||
addr_t Get();
|
||||
addr_t GetUnlocked();
|
||||
void Put(addr_t page);
|
||||
void PutUnlocked(addr_t page);
|
||||
addr_t GetReserved(size_t* counter, enum page_usage usage);
|
||||
addr_t GetReservedUnlocked(size_t* counter, enum page_usage usage);
|
||||
addr_t Get(enum page_usage usage);
|
||||
addr_t GetUnlocked(enum page_usage usage);
|
||||
void Put(addr_t page, enum page_usage usage);
|
||||
void PutUnlocked(addr_t page, enum page_usage usage);
|
||||
void Lock();
|
||||
void Unlock();
|
||||
|
||||
|
@ -87,8 +101,8 @@ int ProvidedProtection(int prot);
|
|||
void PageProtect(addr_t mapto, int protection);
|
||||
void PageProtectAdd(addr_t mapto, int protection);
|
||||
void PageProtectSub(addr_t mapto, int protection);
|
||||
bool MapRange(addr_t where, size_t bytes, int protection);
|
||||
bool UnmapRange(addr_t where, size_t bytes);
|
||||
bool MapRange(addr_t where, size_t bytes, int protection, enum page_usage usage);
|
||||
bool UnmapRange(addr_t where, size_t bytes, enum page_usage usage);
|
||||
void Statistics(size_t* amountused, size_t* totalmem);
|
||||
addr_t GetKernelStack();
|
||||
size_t GetKernelStackSize();
|
||||
|
|
|
@ -255,7 +255,7 @@ static bool ExtractDir(struct initrd_context* ctx, initrd_inode_t* inode, Ref<De
|
|||
{
|
||||
uintptr_t size_aligned = Page::AlignDown(size - from_distance);
|
||||
for ( size_t i = 0; i < size_aligned; i += Page::Size() )
|
||||
Page::Put(Memory::Unmap(from_aligned + i));
|
||||
Page::Put(Memory::Unmap(from_aligned + i), PAGE_USAGE_WASNT_ALLOCATED);
|
||||
Memory::Flush();
|
||||
}
|
||||
}
|
||||
|
@ -422,7 +422,7 @@ bool ExtractFromPhysicalInto(addr_t physaddr, size_t size, Ref<Descriptor> desc)
|
|||
if ( !Memory::LookUp(mapat + i, NULL, NULL) )
|
||||
continue;
|
||||
addr_t addr = Memory::Unmap(mapat + i);
|
||||
Page::Put(addr);
|
||||
Page::Put(addr, PAGE_USAGE_WASNT_ALLOCATED);
|
||||
}
|
||||
Memory::Flush();
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ void UnmapMemory(Process* process, uintptr_t addr, size_t size)
|
|||
{
|
||||
uintptr_t conflict_offset = (uintptr_t) conflict - (uintptr_t) process->segments;
|
||||
size_t conflict_index = conflict_offset / sizeof(struct segment);
|
||||
Memory::UnmapRange(conflict->addr, conflict->size);
|
||||
Memory::UnmapRange(conflict->addr, conflict->size, PAGE_USAGE_USER_SPACE);
|
||||
Memory::Flush();
|
||||
if ( conflict_index + 1 == process->segments_used )
|
||||
{
|
||||
|
@ -92,7 +92,7 @@ void UnmapMemory(Process* process, uintptr_t addr, size_t size)
|
|||
// Delete the middle of the segment if covered there by our request.
|
||||
if ( conflict->addr < addr && addr + size - conflict->addr <= conflict->size )
|
||||
{
|
||||
Memory::UnmapRange(addr, size);
|
||||
Memory::UnmapRange(addr, size, PAGE_USAGE_USER_SPACE);
|
||||
Memory::Flush();
|
||||
struct segment right_segment;
|
||||
right_segment.addr = addr + size;
|
||||
|
@ -109,7 +109,7 @@ void UnmapMemory(Process* process, uintptr_t addr, size_t size)
|
|||
// Delete the part of the segment covered partially from the left.
|
||||
if ( addr <= conflict->addr )
|
||||
{
|
||||
Memory::UnmapRange(conflict->addr, addr + size - conflict->addr);
|
||||
Memory::UnmapRange(conflict->addr, addr + size - conflict->addr, PAGE_USAGE_USER_SPACE);
|
||||
Memory::Flush();
|
||||
conflict->size = conflict->addr + conflict->size - (addr + size);
|
||||
conflict->addr = addr + size;
|
||||
|
@ -119,7 +119,7 @@ void UnmapMemory(Process* process, uintptr_t addr, size_t size)
|
|||
// Delete the part of the segment covered partially from the right.
|
||||
if ( conflict->addr + size <= addr + size )
|
||||
{
|
||||
Memory::UnmapRange(addr, addr + conflict->size + conflict->addr);
|
||||
Memory::UnmapRange(addr, addr + conflict->size + conflict->addr, PAGE_USAGE_USER_SPACE);
|
||||
Memory::Flush();
|
||||
conflict->size -= conflict->size + conflict->addr;
|
||||
continue;
|
||||
|
@ -232,13 +232,13 @@ bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot)
|
|||
new_segment.size = size;
|
||||
new_segment.prot = prot;
|
||||
|
||||
if ( !MapRange(new_segment.addr, new_segment.size, new_segment.prot) )
|
||||
if ( !MapRange(new_segment.addr, new_segment.size, new_segment.prot, PAGE_USAGE_USER_SPACE) )
|
||||
return false;
|
||||
Memory::Flush();
|
||||
|
||||
if ( !AddSegment(process, &new_segment) )
|
||||
{
|
||||
UnmapRange(new_segment.addr, new_segment.size);
|
||||
UnmapRange(new_segment.addr, new_segment.size, PAGE_USAGE_USER_SPACE);
|
||||
Memory::Flush();
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -381,7 +381,7 @@ void Process::ResetAddressSpace()
|
|||
assert(Memory::GetAddressSpace() == addrspace);
|
||||
|
||||
for ( size_t i = 0; i < segments_used; i++ )
|
||||
Memory::UnmapRange(segments[i].addr, segments[i].size);
|
||||
Memory::UnmapRange(segments[i].addr, segments[i].size, PAGE_USAGE_USER_SPACE);
|
||||
|
||||
Memory::Flush();
|
||||
|
||||
|
@ -1632,7 +1632,7 @@ static void* sys_sbrk(intptr_t increment)
|
|||
if ( heap_segment->size < abs_amount )
|
||||
abs_amount = heap_segment->size;
|
||||
uintptr_t new_end = heap_segment->addr + heap_segment->size - abs_amount;
|
||||
Memory::UnmapRange(new_end, abs_amount);
|
||||
Memory::UnmapRange(new_end, abs_amount, PAGE_USAGE_USER_SPACE);
|
||||
heap_segment->size -= abs_amount;
|
||||
// TODO: How do we handle that the heap shrinks to 0 bytes?
|
||||
}
|
||||
|
@ -1652,7 +1652,7 @@ static void* sys_sbrk(intptr_t increment)
|
|||
return errno = ENOMEM, (void*) -1UL;
|
||||
if ( FindOverlappingSegment(process, &growth) )
|
||||
return errno = ENOMEM, (void*) -1UL;
|
||||
if ( !Memory::MapRange(growth.addr, growth.size, growth.prot) )
|
||||
if ( !Memory::MapRange(growth.addr, growth.size, growth.prot, PAGE_USAGE_USER_SPACE) )
|
||||
return errno = ENOMEM, (void*) -1UL;
|
||||
heap_segment->size += growth.size;
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ void RecursiveFreeUserspacePages(size_t level, size_t offset)
|
|||
RecursiveFreeUserspacePages(level-1, offset * ENTRIES + i);
|
||||
addr_t addr = pml->entry[i] & PML_ADDRESS;
|
||||
// No need to unmap the page, we just need to mark it as unused.
|
||||
Page::PutUnlocked(addr);
|
||||
Page::PutUnlocked(addr, PAGE_USAGE_PAGING_OVERHEAD);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,12 +170,12 @@ void DestroyAddressSpace(addr_t fallback)
|
|||
Page::Unlock();
|
||||
|
||||
// These are safe to free since we switched address space.
|
||||
Page::Put(fractal3 & PML_ADDRESS);
|
||||
Page::Put(fractal2 & PML_ADDRESS);
|
||||
Page::Put(fractal1 & PML_ADDRESS);
|
||||
Page::Put(fork2 & PML_ADDRESS);
|
||||
Page::Put(fork1 & PML_ADDRESS);
|
||||
Page::Put(dir & PML_ADDRESS);
|
||||
Page::Put(fractal3 & PML_ADDRESS, PAGE_USAGE_PAGING_OVERHEAD);
|
||||
Page::Put(fractal2 & PML_ADDRESS, PAGE_USAGE_PAGING_OVERHEAD);
|
||||
Page::Put(fractal1 & PML_ADDRESS, PAGE_USAGE_PAGING_OVERHEAD);
|
||||
Page::Put(fork2 & PML_ADDRESS, PAGE_USAGE_PAGING_OVERHEAD);
|
||||
Page::Put(fork1 & PML_ADDRESS, PAGE_USAGE_PAGING_OVERHEAD);
|
||||
Page::Put(dir & PML_ADDRESS, PAGE_USAGE_PAGING_OVERHEAD);
|
||||
}
|
||||
|
||||
const size_t KERNEL_STACK_SIZE = 256UL * 1024UL;
|
||||
|
|
|
@ -54,6 +54,7 @@ size_t stackused;
|
|||
size_t stackreserved;
|
||||
size_t stacklength;
|
||||
size_t totalmem;
|
||||
size_t page_usage_counts[PAGE_USAGE_NUM_KINDS];
|
||||
kthread_mutex_t pagelock;
|
||||
|
||||
} // namespace Page
|
||||
|
@ -234,7 +235,7 @@ void AllocateKernelPMLs()
|
|||
if ( pml->entry[i] & PML_PRESENT )
|
||||
continue;
|
||||
|
||||
addr_t page = Page::Get();
|
||||
addr_t page = Page::Get(PAGE_USAGE_PAGING_OVERHEAD);
|
||||
if ( !page )
|
||||
Panic("out of memory allocating boot PMLs");
|
||||
|
||||
|
@ -253,11 +254,28 @@ void AllocateKernelPMLs()
|
|||
namespace Sortix {
|
||||
namespace Page {
|
||||
|
||||
void PageUsageRegisterUse(addr_t where, enum page_usage usage)
|
||||
{
|
||||
if ( PAGE_USAGE_NUM_KINDS <= usage )
|
||||
return;
|
||||
(void) where;
|
||||
page_usage_counts[usage]++;
|
||||
}
|
||||
|
||||
void PageUsageRegisterFree(addr_t where, enum page_usage usage)
|
||||
{
|
||||
if ( PAGE_USAGE_NUM_KINDS <= usage )
|
||||
return;
|
||||
(void) where;
|
||||
assert(page_usage_counts[usage] != 0);
|
||||
page_usage_counts[usage]--;
|
||||
}
|
||||
|
||||
void ExtendStack()
|
||||
{
|
||||
// This call will always succeed, if it didn't, then the stack
|
||||
// wouldn't be full, and thus this function won't be called.
|
||||
addr_t page = GetUnlocked();
|
||||
addr_t page = GetUnlocked(PAGE_USAGE_PHYSICAL);
|
||||
|
||||
// This call will also succeed, since there are plenty of physical
|
||||
// pages available and it might need some.
|
||||
|
@ -329,7 +347,7 @@ bool Reserve(size_t* counter, size_t amount)
|
|||
return ReserveUnlocked(counter, amount);
|
||||
}
|
||||
|
||||
addr_t GetReservedUnlocked(size_t* counter)
|
||||
addr_t GetReservedUnlocked(size_t* counter, enum page_usage usage)
|
||||
{
|
||||
if ( !*counter )
|
||||
return 0;
|
||||
|
@ -338,32 +356,34 @@ addr_t GetReservedUnlocked(size_t* counter)
|
|||
assert(result == AlignDown(result));
|
||||
stackreserved--;
|
||||
(*counter)--;
|
||||
PageUsageRegisterUse(result, usage);
|
||||
return result;
|
||||
}
|
||||
|
||||
addr_t GetReserved(size_t* counter)
|
||||
addr_t GetReserved(size_t* counter, enum page_usage usage)
|
||||
{
|
||||
ScopedLock lock(&pagelock);
|
||||
return GetReservedUnlocked(counter);
|
||||
return GetReservedUnlocked(counter, usage);
|
||||
}
|
||||
|
||||
addr_t GetUnlocked()
|
||||
addr_t GetUnlocked(enum page_usage usage)
|
||||
{
|
||||
assert(stackreserved <= stackused);
|
||||
if ( unlikely(stackreserved == stackused) )
|
||||
return errno = ENOMEM, 0;
|
||||
addr_t result = STACK[--stackused];
|
||||
assert(result == AlignDown(result));
|
||||
PageUsageRegisterUse(result, usage);
|
||||
return result;
|
||||
}
|
||||
|
||||
addr_t Get()
|
||||
addr_t Get(enum page_usage usage)
|
||||
{
|
||||
ScopedLock lock(&pagelock);
|
||||
return GetUnlocked();
|
||||
return GetUnlocked(usage);
|
||||
}
|
||||
|
||||
void PutUnlocked(addr_t page)
|
||||
void PutUnlocked(addr_t page, enum page_usage usage)
|
||||
{
|
||||
assert(page == AlignDown(page));
|
||||
if ( unlikely(stackused == stacklength) )
|
||||
|
@ -376,12 +396,13 @@ void PutUnlocked(addr_t page)
|
|||
ExtendStack();
|
||||
}
|
||||
STACK[stackused++] = page;
|
||||
PageUsageRegisterFree(page, usage);
|
||||
}
|
||||
|
||||
void Put(addr_t page)
|
||||
void Put(addr_t page, enum page_usage usage)
|
||||
{
|
||||
ScopedLock lock(&pagelock);
|
||||
PutUnlocked(page);
|
||||
PutUnlocked(page, usage);
|
||||
}
|
||||
|
||||
void Lock()
|
||||
|
@ -507,18 +528,18 @@ void Flush()
|
|||
asm volatile ( "mov %0, %%cr3" : : "r"(previous) );
|
||||
}
|
||||
|
||||
bool MapRange(addr_t where, size_t bytes, int protection)
|
||||
bool MapRange(addr_t where, size_t bytes, int protection, enum page_usage usage)
|
||||
{
|
||||
for ( addr_t page = where; page < where + bytes; page += 4096UL )
|
||||
{
|
||||
addr_t physicalpage = Page::Get();
|
||||
addr_t physicalpage = Page::Get(usage);
|
||||
if ( physicalpage == 0 )
|
||||
{
|
||||
while ( where < page )
|
||||
{
|
||||
page -= 4096UL;
|
||||
physicalpage = Unmap(page);
|
||||
Page::Put(physicalpage);
|
||||
Page::Put(physicalpage, usage);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -529,12 +550,12 @@ bool MapRange(addr_t where, size_t bytes, int protection)
|
|||
return true;
|
||||
}
|
||||
|
||||
bool UnmapRange(addr_t where, size_t bytes)
|
||||
bool UnmapRange(addr_t where, size_t bytes, enum page_usage usage)
|
||||
{
|
||||
for ( addr_t page = where; page < where + bytes; page += 4096UL )
|
||||
{
|
||||
addr_t physicalpage = Unmap(page);
|
||||
Page::Put(physicalpage);
|
||||
Page::Put(physicalpage, usage);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -564,7 +585,7 @@ static bool MapInternal(addr_t physical, addr_t mapto, int prot, addr_t extrafla
|
|||
if ( !(entry & PML_PRESENT) )
|
||||
{
|
||||
// TODO: Possible memory leak when page allocation fails.
|
||||
addr_t page = Page::Get();
|
||||
addr_t page = Page::Get(PAGE_USAGE_PAGING_OVERHEAD);
|
||||
|
||||
if ( !page )
|
||||
return false;
|
||||
|
@ -682,7 +703,9 @@ void ForkCleanup(size_t i, size_t level)
|
|||
InvalidatePage(destaddr);
|
||||
ForkCleanup(ENTRIES+1UL, level-1);
|
||||
}
|
||||
Page::Put(phys);
|
||||
enum page_usage usage = 1 < level ? PAGE_USAGE_PAGING_OVERHEAD
|
||||
: PAGE_USAGE_USER_SPACE;
|
||||
Page::Put(phys, usage);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -696,13 +719,15 @@ bool Fork(size_t level, size_t pmloffset)
|
|||
addr_t entry = (PMLS[level] + pmloffset)->entry[i];
|
||||
|
||||
// Link the entry if it isn't supposed to be forked.
|
||||
if ( !(entry & PML_FORK ) )
|
||||
if ( !(entry & PML_PRESENT) || !(entry & PML_FORK ) )
|
||||
{
|
||||
destpml->entry[i] = entry;
|
||||
continue;
|
||||
}
|
||||
|
||||
addr_t phys = Page::Get();
|
||||
enum page_usage usage = 1 < level ? PAGE_USAGE_PAGING_OVERHEAD
|
||||
: PAGE_USAGE_USER_SPACE;
|
||||
addr_t phys = Page::Get(usage);
|
||||
if ( unlikely(!phys) )
|
||||
{
|
||||
ForkCleanup(i, level);
|
||||
|
@ -723,7 +748,7 @@ bool Fork(size_t level, size_t pmloffset)
|
|||
{
|
||||
if ( !Fork(level-1, offset) )
|
||||
{
|
||||
Page::Put(phys);
|
||||
Page::Put(phys, usage);
|
||||
ForkCleanup(i, level);
|
||||
return false;
|
||||
}
|
||||
|
@ -756,12 +781,12 @@ bool Fork(addr_t dir, size_t level, size_t pmloffset)
|
|||
// Create an exact copy of the current address space.
|
||||
addr_t Fork()
|
||||
{
|
||||
addr_t dir = Page::Get();
|
||||
addr_t dir = Page::Get(PAGE_USAGE_PAGING_OVERHEAD);
|
||||
if ( dir == 0 )
|
||||
return 0;
|
||||
if ( !Fork(dir, TOPPMLLEVEL, 0) )
|
||||
{
|
||||
Page::Put(dir);
|
||||
Page::Put(dir, PAGE_USAGE_PAGING_OVERHEAD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ void RecursiveFreeUserspacePages(size_t level, size_t offset)
|
|||
RecursiveFreeUserspacePages(level-1, offset * ENTRIES + i);
|
||||
addr_t addr = pml->entry[i] & PML_ADDRESS;
|
||||
// No need to unmap the page, we just need to mark it as unused.
|
||||
Page::PutUnlocked(addr);
|
||||
Page::PutUnlocked(addr, PAGE_USAGE_PAGING_OVERHEAD);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -161,8 +161,8 @@ void DestroyAddressSpace(addr_t fallback)
|
|||
Page::Unlock();
|
||||
|
||||
// These are safe to free since we switched address space.
|
||||
Page::Put(fractal1 & PML_ADDRESS);
|
||||
Page::Put(dir & PML_ADDRESS);
|
||||
Page::Put(fractal1 & PML_ADDRESS, PAGE_USAGE_PAGING_OVERHEAD);
|
||||
Page::Put(dir & PML_ADDRESS, PAGE_USAGE_PAGING_OVERHEAD);
|
||||
}
|
||||
|
||||
const size_t KERNEL_STACK_SIZE = 256UL * 1024UL;
|
||||
|
|
|
@ -80,7 +80,7 @@ static void FreeMemory(uintptr_t where, size_t bytes)
|
|||
while ( bytes )
|
||||
{
|
||||
addr_t page = Sortix::Memory::Unmap(where);
|
||||
Sortix::Page::Put(page);
|
||||
Sortix::Page::Put(page, Sortix::PAGE_USAGE_KERNEL_HEAP);
|
||||
|
||||
bytes -= PAGESIZE;
|
||||
where += PAGESIZE;
|
||||
|
@ -95,7 +95,7 @@ static bool AllocateMemory(uintptr_t where, size_t bytes)
|
|||
|
||||
while ( bytes )
|
||||
{
|
||||
addr_t page = Sortix::Page::Get();
|
||||
addr_t page = Sortix::Page::Get(Sortix::PAGE_USAGE_KERNEL_HEAP);
|
||||
if ( !page )
|
||||
{
|
||||
FreeMemory(where, pos-where);
|
||||
|
@ -104,7 +104,7 @@ static bool AllocateMemory(uintptr_t where, size_t bytes)
|
|||
|
||||
if ( !Sortix::Memory::Map(page, pos, PROT_KREAD | PROT_KWRITE) )
|
||||
{
|
||||
Sortix::Page::Put(page);
|
||||
Sortix::Page::Put(page, Sortix::PAGE_USAGE_KERNEL_HEAP);
|
||||
FreeMemory(where, pos-where);
|
||||
return false;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue