Relicense Sortix to the ISC license.
I hereby relicense all my work on Sortix under the ISC license as below.
All Sortix contributions by other people are already under this license,
are not substantial enough to be copyrightable, or have been removed.
All imported code from other projects is compatible with this license.
All GPL licensed code from other projects had previously been removed.
Copyright 2011-2016 Jonas 'Sortie' Termansen and contributors.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2016-03-02 17:38:16 -05:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2011, 2012, 2014, 2015 Jonas 'Sortie' Termansen.
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*
|
|
|
|
* x86-family/memorymanagement.cpp
|
|
|
|
* Handles memory for the x86 family of architectures.
|
|
|
|
*/
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-09-22 08:57:20 -04:00
|
|
|
#include <assert.h>
|
2012-09-22 10:44:50 -04:00
|
|
|
#include <errno.h>
|
2012-09-22 14:38:34 -04:00
|
|
|
#include <string.h>
|
2013-01-08 18:41:35 -05:00
|
|
|
|
2013-10-26 20:42:10 -04:00
|
|
|
#include <sortix/mman.h>
|
|
|
|
|
|
|
|
#include <sortix/kernel/kernel.h>
|
|
|
|
#include <sortix/kernel/kthread.h>
|
|
|
|
#include <sortix/kernel/memorymanagement.h>
|
|
|
|
#include <sortix/kernel/panic.h>
|
2013-09-14 11:10:14 -04:00
|
|
|
#include <sortix/kernel/pat.h>
|
2013-10-26 20:42:10 -04:00
|
|
|
#include <sortix/kernel/syscall.h>
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
#include "multiboot.h"
|
|
|
|
#include "memorymanagement.h"
|
2012-06-30 18:50:27 -04:00
|
|
|
#include "msr.h"
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
namespace Sortix {
|
|
|
|
|
|
|
|
extern size_t end;
|
|
|
|
|
|
|
|
} // namespace Sortix
|
|
|
|
|
|
|
|
namespace Sortix {
|
|
|
|
namespace Page {
|
|
|
|
|
|
|
|
void InitPushRegion(addr_t position, size_t length);
|
2015-03-16 12:24:42 -04:00
|
|
|
size_t pagesnotonstack = 0;
|
|
|
|
size_t stackused = 0;
|
|
|
|
size_t stackreserved = 0;
|
|
|
|
size_t stacklength = 4096 / sizeof(addr_t);
|
|
|
|
size_t totalmem = 0;
|
2014-05-15 13:55:23 -04:00
|
|
|
size_t page_usage_counts[PAGE_USAGE_NUM_KINDS];
|
2015-03-16 12:24:42 -04:00
|
|
|
kthread_mutex_t pagelock = KTHREAD_MUTEX_INITIALIZER;
|
2014-01-03 19:04:29 -05:00
|
|
|
|
|
|
|
} // namespace Page
|
|
|
|
} // namespace Sortix
|
|
|
|
|
|
|
|
namespace Sortix {
|
|
|
|
namespace Memory {
|
|
|
|
|
|
|
|
addr_t PAT2PMLFlags[PAT_NUM];
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2015-11-03 20:38:25 -05:00
|
|
|
static bool CheckUsedRange(addr_t test,
|
|
|
|
addr_t from_unaligned,
|
|
|
|
size_t size_unaligned,
|
|
|
|
size_t* dist_ptr)
|
|
|
|
{
|
|
|
|
addr_t from = Page::AlignDown(from_unaligned);
|
|
|
|
size_unaligned += from_unaligned - from;
|
|
|
|
size_t size = Page::AlignUp(size_unaligned);
|
|
|
|
if ( from <= test && test < from + size )
|
|
|
|
return *dist_ptr = from + size - test, true;
|
|
|
|
if ( test < from && from - test < *dist_ptr )
|
|
|
|
*dist_ptr = from - test;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool CheckUsedString(addr_t test,
|
|
|
|
const char* string,
|
|
|
|
size_t* dist_ptr)
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
2015-11-03 20:38:25 -05:00
|
|
|
size_t size = strlen(string) + 1;
|
|
|
|
return CheckUsedRange(test, (addr_t) string, size, dist_ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool CheckUsedRanges(multiboot_info_t* bootinfo,
|
|
|
|
addr_t test,
|
|
|
|
size_t* dist_ptr)
|
|
|
|
{
|
|
|
|
addr_t kernel_end = (addr_t) &end;
|
|
|
|
if ( CheckUsedRange(test, 0, kernel_end, dist_ptr) )
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if ( CheckUsedRange(test, (addr_t) bootinfo, sizeof(*bootinfo), dist_ptr) )
|
|
|
|
return true;
|
|
|
|
|
|
|
|
const char* cmdline = (const char*) (uintptr_t) bootinfo->cmdline;
|
|
|
|
if ( CheckUsedString(test, cmdline, dist_ptr) )
|
|
|
|
return true;
|
|
|
|
|
|
|
|
size_t mods_size = bootinfo->mods_count * sizeof(struct multiboot_mod_list);
|
|
|
|
if ( CheckUsedRange(test, bootinfo->mods_addr, mods_size, dist_ptr) )
|
|
|
|
return true;
|
|
|
|
|
|
|
|
struct multiboot_mod_list* modules =
|
|
|
|
(struct multiboot_mod_list*) (uintptr_t) bootinfo->mods_addr;
|
|
|
|
for ( uint32_t i = 0; i < bootinfo->mods_count; i++ )
|
|
|
|
{
|
|
|
|
struct multiboot_mod_list* module = &modules[i];
|
|
|
|
assert(module->mod_start <= module->mod_end);
|
|
|
|
size_t mod_size = module->mod_end - module->mod_start;
|
|
|
|
if ( CheckUsedRange(test, module->mod_start, mod_size, dist_ptr) )
|
|
|
|
return true;
|
|
|
|
const char* mod_cmdline = (const char*) (uintptr_t) module->cmdline;
|
|
|
|
if ( CheckUsedString(test, mod_cmdline, dist_ptr) )
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( CheckUsedRange(test, bootinfo->mmap_addr, bootinfo->mmap_length,
|
|
|
|
dist_ptr) )
|
|
|
|
return true;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2015-11-03 20:38:25 -05:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Init(multiboot_info_t* bootinfo)
|
|
|
|
{
|
2015-03-16 12:24:42 -04:00
|
|
|
if ( !(bootinfo->flags & MULTIBOOT_INFO_MEM_MAP) )
|
2015-03-18 19:25:01 -04:00
|
|
|
Panic("The memory map flag was't set in the multiboot structure.");
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// If supported, setup the Page Attribute Table feature that allows
|
|
|
|
// us to control the memory type (caching) of memory more precisely.
|
2013-09-14 11:10:14 -04:00
|
|
|
if ( IsPATSupported() )
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
2013-09-14 11:10:14 -04:00
|
|
|
InitializePAT();
|
2014-01-03 19:04:29 -05:00
|
|
|
for ( addr_t i = 0; i < PAT_NUM; i++ )
|
|
|
|
PAT2PMLFlags[i] = EncodePATAsPMLFlag(i);
|
|
|
|
}
|
2015-03-16 12:24:42 -04:00
|
|
|
// Otherwise, reroute all requests to the backwards compatible scheme.
|
|
|
|
// TODO: Not all early 32-bit x86 CPUs supports these values.
|
2014-01-03 19:04:29 -05:00
|
|
|
else
|
|
|
|
{
|
|
|
|
PAT2PMLFlags[PAT_UC] = PML_WRTHROUGH | PML_NOCACHE;
|
|
|
|
PAT2PMLFlags[PAT_WC] = PML_WRTHROUGH | PML_NOCACHE; // Approx.
|
|
|
|
PAT2PMLFlags[2] = 0; // No such flag.
|
|
|
|
PAT2PMLFlags[3] = 0; // No such flag.
|
|
|
|
PAT2PMLFlags[PAT_WT] = PML_WRTHROUGH;
|
|
|
|
PAT2PMLFlags[PAT_WP] = PML_WRTHROUGH; // Approx.
|
|
|
|
PAT2PMLFlags[PAT_WB] = 0;
|
|
|
|
PAT2PMLFlags[PAT_UCM] = PML_NOCACHE;
|
|
|
|
}
|
2011-12-23 07:09:09 -05:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
typedef const multiboot_memory_map_t* mmap_t;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Loop over every detected memory region.
|
2015-11-03 20:38:25 -05:00
|
|
|
for ( mmap_t mmap = (mmap_t) (addr_t) bootinfo->mmap_addr;
|
|
|
|
(addr_t) mmap < bootinfo->mmap_addr + bootinfo->mmap_length;
|
|
|
|
mmap = (mmap_t) ((addr_t) mmap + mmap->size + sizeof(mmap->size)) )
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
|
|
|
// Check that we can use this kind of RAM.
|
|
|
|
if ( mmap->type != 1 )
|
|
|
|
continue;
|
|
|
|
|
2015-11-03 20:38:25 -05:00
|
|
|
// Truncate the memory area if needed.
|
|
|
|
uint64_t mmap_addr = mmap->addr;
|
|
|
|
uint64_t mmap_len = mmap->len;
|
2014-01-03 19:04:29 -05:00
|
|
|
#if defined(__i386__)
|
2015-11-03 20:38:25 -05:00
|
|
|
if ( 0xFFFFFFFFULL < mmap_addr )
|
2014-01-03 19:04:29 -05:00
|
|
|
continue;
|
2015-11-03 20:38:25 -05:00
|
|
|
if ( 0xFFFFFFFFULL < mmap_addr + mmap_len )
|
|
|
|
mmap_len = 0x100000000ULL - mmap_addr;
|
2014-01-03 19:04:29 -05:00
|
|
|
#endif
|
|
|
|
|
2015-11-03 20:38:25 -05:00
|
|
|
// Properly page align the entry if needed.
|
|
|
|
// TODO: Is the bootloader required to page align this? This could be
|
|
|
|
// raw BIOS data that might not be page aligned? But that would
|
|
|
|
// be a silly computer.
|
|
|
|
addr_t base_unaligned = (addr_t) mmap_addr;
|
|
|
|
addr_t base = Page::AlignUp(base_unaligned);
|
|
|
|
if ( mmap_len < base - base_unaligned )
|
|
|
|
continue;
|
|
|
|
size_t length_unaligned = mmap_len - (base - base_unaligned);
|
|
|
|
size_t length = Page::AlignDown(length_unaligned);
|
|
|
|
if ( !length )
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Count the amount of usable RAM.
|
2014-01-03 19:04:29 -05:00
|
|
|
Page::totalmem += length;
|
|
|
|
|
|
|
|
// Give all the physical memory to the physical memory allocator
|
|
|
|
// but make sure not to give it things we already use.
|
2015-11-03 20:38:25 -05:00
|
|
|
addr_t processed = base;
|
|
|
|
while ( processed < base + length )
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
2015-11-03 20:38:25 -05:00
|
|
|
size_t distance = base + length - processed;
|
|
|
|
if ( !CheckUsedRanges(bootinfo, processed, &distance) )
|
|
|
|
Page::InitPushRegion(processed, distance);
|
|
|
|
processed += distance;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2015-03-18 19:25:01 -04:00
|
|
|
// Prepare the non-forkable kernel PMLs such that forking the kernel address
|
|
|
|
// space will always keep the kernel mapped.
|
|
|
|
for ( size_t i = ENTRIES / 2; i < ENTRIES; i++ )
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
2015-03-18 19:25:01 -04:00
|
|
|
PML* const pml = PMLS[TOPPMLLEVEL];
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( pml->entry[i] & PML_PRESENT )
|
|
|
|
continue;
|
Fixed two very nasty bugs in the x86 memory management code.
1) The PML2 was not initialized to zeroes, thus leaving some bits behind that
caused the fork code to go crazy, forking the unforkable, and mapping addresses
that never, ever, should have been mapped, leaving behind a trail of page faults
and general protection faults on some computers, while other computers worked
because the uninitalized memory just wasn't uninitialized enough. Yep, this was
a schrödinbug!
2) Fixed a time bomb. The kernel heap was accidentally put such that whenever a
few megabytes were allocated, it would begin overwriting the physical page stack
causing unthinkable events to unfold and would probably be even more obscure to
debug than 1).
Oh, and some string errors fixed and removed RunApplication from kernel.cpp,
funny thing that even linked in the first place. Guess, the optimizer actually
did work for once. :)
2011-10-19 21:40:37 -04:00
|
|
|
|
2014-05-15 13:55:23 -04:00
|
|
|
addr_t page = Page::Get(PAGE_USAGE_PAGING_OVERHEAD);
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( !page )
|
2015-03-18 19:25:01 -04:00
|
|
|
Panic("Out of memory allocating boot PMLs.");
|
2014-01-03 19:04:29 -05:00
|
|
|
|
2015-03-18 19:25:01 -04:00
|
|
|
pml->entry[i] = page | PML_WRITABLE | PML_PRESENT;
|
2014-01-03 19:04:29 -05:00
|
|
|
|
|
|
|
// Invalidate the new PML and reset it to zeroes.
|
|
|
|
addr_t pmladdr = (addr_t) (PMLS[TOPPMLLEVEL-1] + i);
|
|
|
|
InvalidatePage(pmladdr);
|
|
|
|
memset((void*) pmladdr, 0, sizeof(PML));
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2015-03-18 19:25:01 -04:00
|
|
|
void Statistics(size_t* amountused, size_t* totalmem)
|
|
|
|
{
|
|
|
|
size_t memfree = (Page::stackused - Page::stackreserved) << 12UL;
|
|
|
|
size_t memused = Page::totalmem - memfree;
|
|
|
|
if ( amountused )
|
|
|
|
*amountused = memused;
|
|
|
|
if ( totalmem )
|
|
|
|
*totalmem = Page::totalmem;
|
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
} // namespace Memory
|
|
|
|
} // namespace Sortix
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
namespace Sortix {
|
|
|
|
namespace Page {
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-05-15 13:55:23 -04:00
|
|
|
void PageUsageRegisterUse(addr_t where, enum page_usage usage)
|
|
|
|
{
|
|
|
|
if ( PAGE_USAGE_NUM_KINDS <= usage )
|
|
|
|
return;
|
|
|
|
(void) where;
|
|
|
|
page_usage_counts[usage]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PageUsageRegisterFree(addr_t where, enum page_usage usage)
|
|
|
|
{
|
|
|
|
if ( PAGE_USAGE_NUM_KINDS <= usage )
|
|
|
|
return;
|
|
|
|
(void) where;
|
|
|
|
assert(page_usage_counts[usage] != 0);
|
|
|
|
page_usage_counts[usage]--;
|
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void ExtendStack()
|
|
|
|
{
|
|
|
|
// This call will always succeed, if it didn't, then the stack
|
|
|
|
// wouldn't be full, and thus this function won't be called.
|
2014-05-15 13:55:23 -04:00
|
|
|
addr_t page = GetUnlocked(PAGE_USAGE_PHYSICAL);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// This call will also succeed, since there are plenty of physical
|
|
|
|
// pages available and it might need some.
|
|
|
|
addr_t virt = (addr_t) (STACK + stacklength);
|
|
|
|
if ( !Memory::Map(page, virt, PROT_KREAD | PROT_KWRITE) )
|
|
|
|
Panic("Unable to extend page stack, which should have worked");
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// TODO: This may not be needed during the boot process!
|
|
|
|
//Memory::InvalidatePage((addr_t) (STACK + stacklength));
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
stacklength += 4096UL / sizeof(addr_t);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void InitPushRegion(addr_t position, size_t length)
|
|
|
|
{
|
|
|
|
// Align our entries on page boundaries.
|
|
|
|
addr_t newposition = Page::AlignUp(position);
|
|
|
|
length = Page::AlignDown((position + length) - newposition);
|
|
|
|
position = newposition;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
while ( length )
|
|
|
|
{
|
|
|
|
if ( unlikely(stackused == stacklength) )
|
|
|
|
{
|
|
|
|
if ( stackused == MAXSTACKLENGTH )
|
|
|
|
{
|
|
|
|
pagesnotonstack += length / 4096UL;
|
|
|
|
return;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
ExtendStack();
|
2012-03-21 12:19:26 -04:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t* stackentry = &(STACK[stackused++]);
|
|
|
|
*stackentry = position;
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
length -= 4096UL;
|
|
|
|
position += 4096UL;
|
|
|
|
}
|
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool ReserveUnlocked(size_t* counter, size_t least, size_t ideal)
|
|
|
|
{
|
|
|
|
assert(least < ideal);
|
|
|
|
size_t available = stackused - stackreserved;
|
|
|
|
if ( least < available )
|
|
|
|
return errno = ENOMEM, false;
|
|
|
|
if ( available < ideal )
|
|
|
|
ideal = available;
|
|
|
|
stackreserved += ideal;
|
|
|
|
*counter += ideal;
|
|
|
|
return true;
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool Reserve(size_t* counter, size_t least, size_t ideal)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
return ReserveUnlocked(counter, least, ideal);
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool ReserveUnlocked(size_t* counter, size_t amount)
|
|
|
|
{
|
|
|
|
return ReserveUnlocked(counter, amount, amount);
|
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool Reserve(size_t* counter, size_t amount)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
return ReserveUnlocked(counter, amount);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-05-15 13:55:23 -04:00
|
|
|
addr_t GetReservedUnlocked(size_t* counter, enum page_usage usage)
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
|
|
|
if ( !*counter )
|
|
|
|
return 0;
|
|
|
|
assert(stackused); // After all, we did _reserve_ the memory.
|
|
|
|
addr_t result = STACK[--stackused];
|
|
|
|
assert(result == AlignDown(result));
|
|
|
|
stackreserved--;
|
|
|
|
(*counter)--;
|
2014-05-15 13:55:23 -04:00
|
|
|
PageUsageRegisterUse(result, usage);
|
2014-01-03 19:04:29 -05:00
|
|
|
return result;
|
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-05-15 13:55:23 -04:00
|
|
|
addr_t GetReserved(size_t* counter, enum page_usage usage)
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
2014-05-15 13:55:23 -04:00
|
|
|
return GetReservedUnlocked(counter, usage);
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-05-15 13:55:23 -04:00
|
|
|
addr_t GetUnlocked(enum page_usage usage)
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
|
|
|
assert(stackreserved <= stackused);
|
|
|
|
if ( unlikely(stackreserved == stackused) )
|
|
|
|
return errno = ENOMEM, 0;
|
|
|
|
addr_t result = STACK[--stackused];
|
|
|
|
assert(result == AlignDown(result));
|
2014-05-15 13:55:23 -04:00
|
|
|
PageUsageRegisterUse(result, usage);
|
2014-01-03 19:04:29 -05:00
|
|
|
return result;
|
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-05-15 13:55:23 -04:00
|
|
|
addr_t Get(enum page_usage usage)
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
2014-05-15 13:55:23 -04:00
|
|
|
return GetUnlocked(usage);
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2015-01-17 16:17:51 -05:00
|
|
|
// TODO: This competes with the normal allocation for precious 32-bit pages, we
|
|
|
|
// should use different pools for this, and preferably preallocate some
|
|
|
|
// 32-bit pages exclusively for driver usage. Also, get proper hardware
|
|
|
|
// without these issues.
|
|
|
|
addr_t Get32BitUnlocked(enum page_usage usage)
|
|
|
|
{
|
|
|
|
assert(stackreserved <= stackused);
|
|
|
|
if ( unlikely(stackreserved == stackused) )
|
|
|
|
return errno = ENOMEM, 0;
|
|
|
|
for ( size_t ii = stackused; 0 < ii; ii-- )
|
|
|
|
{
|
|
|
|
size_t i = ii - 1;
|
|
|
|
addr_t result = STACK[i];
|
|
|
|
assert(result == AlignDown(result));
|
|
|
|
if ( 4 < sizeof(void*) && UINT32_MAX < result )
|
|
|
|
continue;
|
|
|
|
if ( i + 1 != stackused )
|
|
|
|
{
|
|
|
|
STACK[i] = STACK[stackused - 1];
|
|
|
|
STACK[stackused - 1] = result;
|
|
|
|
}
|
|
|
|
stackused--;
|
|
|
|
PageUsageRegisterUse(result, usage);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
return errno = ENOMEM, 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr_t Get32Bit(enum page_usage usage)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
return Get32BitUnlocked(usage);
|
|
|
|
}
|
|
|
|
|
2014-05-15 13:55:23 -04:00
|
|
|
void PutUnlocked(addr_t page, enum page_usage usage)
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
|
|
|
assert(page == AlignDown(page));
|
|
|
|
if ( unlikely(stackused == stacklength) )
|
|
|
|
{
|
|
|
|
if ( stackused == MAXSTACKLENGTH )
|
2012-08-01 12:50:32 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
pagesnotonstack++;
|
|
|
|
return;
|
2012-08-01 12:50:32 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
ExtendStack();
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
STACK[stackused++] = page;
|
2014-05-15 13:55:23 -04:00
|
|
|
PageUsageRegisterFree(page, usage);
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
2012-03-18 21:39:11 -04:00
|
|
|
|
2014-05-15 13:55:23 -04:00
|
|
|
void Put(addr_t page, enum page_usage usage)
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
2014-05-15 13:55:23 -04:00
|
|
|
PutUnlocked(page, usage);
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void Lock()
|
|
|
|
{
|
|
|
|
kthread_mutex_lock(&pagelock);
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void Unlock()
|
|
|
|
{
|
|
|
|
kthread_mutex_unlock(&pagelock);
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
} // namespace Page
|
|
|
|
} // namespace Sortix
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
namespace Sortix {
|
|
|
|
namespace Memory {
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t ProtectionToPMLFlags(int prot)
|
|
|
|
{
|
2015-08-27 15:39:35 -04:00
|
|
|
addr_t result = PML_NX;
|
|
|
|
if ( prot & PROT_EXEC )
|
|
|
|
{
|
|
|
|
result |= PML_USERSPACE;
|
|
|
|
result &= ~PML_NX;
|
|
|
|
}
|
|
|
|
if ( prot & PROT_READ )
|
|
|
|
result |= PML_USERSPACE;
|
|
|
|
if ( prot & PROT_WRITE )
|
|
|
|
result |= PML_USERSPACE | PML_WRITABLE;
|
|
|
|
if ( prot & PROT_KEXEC )
|
|
|
|
result &= ~PML_NX;
|
|
|
|
if ( prot & PROT_KREAD )
|
|
|
|
result |= 0;
|
|
|
|
if ( prot & PROT_KWRITE )
|
|
|
|
result |= PML_WRITABLE;
|
|
|
|
if ( prot & PROT_FORK )
|
|
|
|
result |= PML_FORK;
|
2014-01-03 19:04:29 -05:00
|
|
|
return result;
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
int PMLFlagsToProtection(addr_t flags)
|
|
|
|
{
|
2015-08-27 15:39:35 -04:00
|
|
|
int prot = PROT_KREAD;
|
|
|
|
if ( (flags & PML_USERSPACE) && !(flags & PML_NX) )
|
|
|
|
prot |= PROT_EXEC;
|
|
|
|
if ( (flags & PML_USERSPACE) )
|
|
|
|
prot |= PROT_READ;
|
|
|
|
if ( (flags & PML_USERSPACE) && (flags & PML_WRITABLE) )
|
2014-01-03 19:04:29 -05:00
|
|
|
prot |= PROT_WRITE;
|
2015-08-27 15:39:35 -04:00
|
|
|
if ( !(flags & PML_NX) )
|
|
|
|
prot |= PROT_KEXEC;
|
|
|
|
if ( flags & PML_WRITABLE )
|
|
|
|
prot |= PROT_KWRITE;
|
2014-11-04 21:53:32 -05:00
|
|
|
if ( flags & PML_FORK )
|
|
|
|
prot |= PROT_FORK;
|
2014-01-03 19:04:29 -05:00
|
|
|
return prot;
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
int ProvidedProtection(int prot)
|
|
|
|
{
|
2014-11-04 21:53:32 -05:00
|
|
|
return PMLFlagsToProtection(ProtectionToPMLFlags(prot));
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool LookUp(addr_t mapto, addr_t* physical, int* protection)
|
|
|
|
{
|
|
|
|
// Translate the virtual address into PML indexes.
|
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
pmlchildid[i] = mapto >> (12 + (i-1) * TRANSBITS) & MASK;
|
2013-01-12 09:16:19 -05:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
int prot = PROT_USER | PROT_KERNEL | PROT_FORK;
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// For each PML level, make sure it exists.
|
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
|
|
|
|
|
|
|
addr_t entry = pml->entry[childid];
|
|
|
|
if ( !(entry & PML_PRESENT) )
|
|
|
|
return false;
|
2014-11-04 21:53:32 -05:00
|
|
|
addr_t entryflags = entry & ~PML_ADDRESS;
|
2014-01-03 19:04:29 -05:00
|
|
|
int entryprot = PMLFlagsToProtection(entryflags);
|
|
|
|
prot &= entryprot;
|
|
|
|
|
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
|
|
|
offset = offset * ENTRIES + childid;
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t entry = (PMLS[1] + offset)->entry[pmlchildid[1]];
|
|
|
|
if ( !(entry & PML_PRESENT) )
|
|
|
|
return false;
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-11-04 21:53:32 -05:00
|
|
|
addr_t entryflags = entry & ~PML_ADDRESS;
|
2014-01-03 19:04:29 -05:00
|
|
|
int entryprot = PMLFlagsToProtection(entryflags);
|
|
|
|
prot &= entryprot;
|
|
|
|
addr_t phys = entry & PML_ADDRESS;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( physical )
|
|
|
|
*physical = phys;
|
|
|
|
if ( protection )
|
|
|
|
*protection = prot;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
return true;
|
|
|
|
}
|
2012-04-09 08:15:40 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void InvalidatePage(addr_t /*addr*/)
|
|
|
|
{
|
|
|
|
// TODO: Actually just call the instruction.
|
|
|
|
Flush();
|
|
|
|
}
|
Fixed the horrible 'nofoo' bug!
When compiled with gcc 4.6.1, 32-bit Sortix would triple fault during
early boot: When the TLB is being flushed, somehow a garbage value had
sneaked into Sortix::Memory::currentdir, and a non-page aligned (and
garbage) page directory is loaded. (Triple fault, here we come!)
However, adding a volatile addr_t foo after the currentdir variable
actually caused the system to boot correctly - the garbage was written
into that variable instead. To debug the problem, I set the foo value
to 0: as long as !foo (hence the name nofoo) everything was alright.
After closer examination I found that the initrd open code wrote to a
pointer supplied by kernel.cpp. The element pointed to was on the
stack. Worse, its address was the same as currentdir (now foo).
Indeed, the stack had gone into the kernel's data segment!
Turns out that this gcc configuration stores variables in the data
segment in the reverse order they are defined in, whereas previous
compilers did the opposite. The hack used to set up the stack during
early boot relied on this (now obviously incorrect) fact.
In effect, the stack was initialized to the end of the stack, not
the start of it: completely ignoring all the nice stack space
allocated in kernel.cpp.
I did not see that one coming.
2011-12-24 21:33:12 -05:00
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t GetAddressSpace()
|
|
|
|
{
|
2014-03-03 18:11:13 -05:00
|
|
|
addr_t result;
|
|
|
|
asm ( "mov %%cr3, %0" : "=r"(result) );
|
|
|
|
return result;
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
2011-11-20 18:27:10 -05:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t SwitchAddressSpace(addr_t addrspace)
|
|
|
|
{
|
2014-03-03 18:11:13 -05:00
|
|
|
assert(Page::IsAligned(addrspace));
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
addr_t previous = GetAddressSpace();
|
|
|
|
asm volatile ( "mov %0, %%cr3" : : "r"(addrspace) );
|
2014-01-03 19:04:29 -05:00
|
|
|
return previous;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-03-03 18:11:13 -05:00
|
|
|
void Flush()
|
|
|
|
{
|
|
|
|
addr_t previous;
|
|
|
|
asm ( "mov %%cr3, %0" : "=r"(previous) );
|
|
|
|
asm volatile ( "mov %0, %%cr3" : : "r"(previous) );
|
|
|
|
}
|
|
|
|
|
2014-05-15 13:55:23 -04:00
|
|
|
bool MapRange(addr_t where, size_t bytes, int protection, enum page_usage usage)
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
|
|
|
for ( addr_t page = where; page < where + bytes; page += 4096UL )
|
|
|
|
{
|
2014-05-15 13:55:23 -04:00
|
|
|
addr_t physicalpage = Page::Get(usage);
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( physicalpage == 0 )
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
while ( where < page )
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
page -= 4096UL;
|
|
|
|
physicalpage = Unmap(page);
|
2014-05-15 13:55:23 -04:00
|
|
|
Page::Put(physicalpage, usage);
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
return false;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
Map(physicalpage, page, protection);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-05-15 13:55:23 -04:00
|
|
|
bool UnmapRange(addr_t where, size_t bytes, enum page_usage usage)
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
|
|
|
for ( addr_t page = where; page < where + bytes; page += 4096UL )
|
|
|
|
{
|
|
|
|
addr_t physicalpage = Unmap(page);
|
2014-05-15 13:55:23 -04:00
|
|
|
Page::Put(physicalpage, usage);
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
static bool MapInternal(addr_t physical, addr_t mapto, int prot, addr_t extraflags = 0)
|
|
|
|
{
|
|
|
|
addr_t flags = ProtectionToPMLFlags(prot) | PML_PRESENT;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Translate the virtual address into PML indexes.
|
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
pmlchildid[i] = mapto >> (12 + (i-1) * TRANSBITS) & MASK;
|
2011-11-28 19:21:59 -05:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// For each PML level, make sure it exists.
|
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t& entry = pml->entry[childid];
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
|
|
|
size_t childoffset = offset * ENTRIES + childid;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( !(entry & PML_PRESENT) )
|
2012-03-21 12:19:26 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
// TODO: Possible memory leak when page allocation fails.
|
2014-05-15 13:55:23 -04:00
|
|
|
addr_t page = Page::Get(PAGE_USAGE_PAGING_OVERHEAD);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( !page )
|
|
|
|
return false;
|
|
|
|
addr_t pmlflags = PML_PRESENT | PML_WRITABLE | PML_USERSPACE
|
|
|
|
| PML_FORK;
|
|
|
|
entry = page | pmlflags;
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Invalidate the new PML and reset it to zeroes.
|
|
|
|
addr_t pmladdr = (addr_t) (PMLS[i-1] + childoffset);
|
|
|
|
InvalidatePage(pmladdr);
|
|
|
|
memset((void*) pmladdr, 0, sizeof(PML));
|
2012-03-21 12:19:26 -04:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
offset = childoffset;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Actually map the physical page to the virtual page.
|
|
|
|
const addr_t entry = physical | flags | extraflags;
|
|
|
|
(PMLS[1] + offset)->entry[pmlchildid[1]] = entry;
|
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool Map(addr_t physical, addr_t mapto, int prot)
|
|
|
|
{
|
|
|
|
return MapInternal(physical, mapto, prot);
|
|
|
|
}
|
|
|
|
|
|
|
|
void PageProtect(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
|
|
|
if ( !LookUp(mapto, &phys, NULL) )
|
|
|
|
return;
|
|
|
|
Map(phys, mapto, protection);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void PageProtectAdd(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
|
|
|
int prot;
|
|
|
|
if ( !LookUp(mapto, &phys, &prot) )
|
|
|
|
return;
|
|
|
|
prot |= protection;
|
|
|
|
Map(phys, mapto, prot);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void PageProtectSub(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
|
|
|
int prot;
|
|
|
|
if ( !LookUp(mapto, &phys, &prot) )
|
|
|
|
return;
|
|
|
|
prot &= ~protection;
|
|
|
|
Map(phys, mapto, prot);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t Unmap(addr_t mapto)
|
|
|
|
{
|
|
|
|
// Translate the virtual address into PML indexes.
|
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
{
|
|
|
|
pmlchildid[i] = mapto >> (12 + (i-1) * TRANSBITS) & MASK;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// For each PML level, make sure it exists.
|
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t& entry = pml->entry[childid];
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( !(entry & PML_PRESENT) )
|
2014-02-20 10:48:28 -05:00
|
|
|
PanicF("Attempted to unmap virtual page 0x%jX, but the virtual"
|
2014-01-03 19:04:29 -05:00
|
|
|
" page was wasn't mapped. This is a bug in the code "
|
2014-02-20 10:48:28 -05:00
|
|
|
"code calling this function", (uintmax_t) mapto);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
|
|
|
offset = offset * ENTRIES + childid;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t& entry = (PMLS[1] + offset)->entry[pmlchildid[1]];
|
|
|
|
addr_t result = entry & PML_ADDRESS;
|
|
|
|
entry = 0;
|
2012-03-18 21:39:11 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// TODO: If all the entries in PML[N] are not-present, then who
|
|
|
|
// unmaps its entry from PML[N-1]?
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MapPAT(addr_t physical, addr_t mapto, int prot, addr_t mtype)
|
|
|
|
{
|
|
|
|
addr_t extraflags = PAT2PMLFlags[mtype];
|
|
|
|
return MapInternal(physical, mapto, prot, extraflags);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void ForkCleanup(size_t i, size_t level)
|
|
|
|
{
|
|
|
|
PML* destpml = FORKPML + level;
|
|
|
|
if ( !i )
|
|
|
|
return;
|
|
|
|
for ( size_t n = 0; n < i-1; n++ )
|
|
|
|
{
|
|
|
|
addr_t entry = destpml->entry[i];
|
|
|
|
if ( !(entry & PML_FORK ) )
|
|
|
|
continue;
|
|
|
|
addr_t phys = entry & PML_ADDRESS;
|
|
|
|
if ( 1 < level )
|
|
|
|
{
|
|
|
|
addr_t destaddr = (addr_t) (FORKPML + level-1);
|
|
|
|
Map(phys, destaddr, PROT_KREAD | PROT_KWRITE);
|
|
|
|
InvalidatePage(destaddr);
|
|
|
|
ForkCleanup(ENTRIES+1UL, level-1);
|
|
|
|
}
|
2014-05-15 13:55:23 -04:00
|
|
|
enum page_usage usage = 1 < level ? PAGE_USAGE_PAGING_OVERHEAD
|
|
|
|
: PAGE_USAGE_USER_SPACE;
|
|
|
|
Page::Put(phys, usage);
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// TODO: Copying every frame is endlessly useless in many uses. It'd be
|
|
|
|
// nice to upgrade this to a copy-on-write algorithm.
|
|
|
|
bool Fork(size_t level, size_t pmloffset)
|
|
|
|
{
|
|
|
|
PML* destpml = FORKPML + level;
|
|
|
|
for ( size_t i = 0; i < ENTRIES; i++ )
|
|
|
|
{
|
|
|
|
addr_t entry = (PMLS[level] + pmloffset)->entry[i];
|
2012-03-18 21:39:11 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Link the entry if it isn't supposed to be forked.
|
2014-05-15 13:55:23 -04:00
|
|
|
if ( !(entry & PML_PRESENT) || !(entry & PML_FORK ) )
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
|
|
|
destpml->entry[i] = entry;
|
|
|
|
continue;
|
|
|
|
}
|
2012-03-18 21:39:11 -04:00
|
|
|
|
2014-05-15 13:55:23 -04:00
|
|
|
enum page_usage usage = 1 < level ? PAGE_USAGE_PAGING_OVERHEAD
|
|
|
|
: PAGE_USAGE_USER_SPACE;
|
|
|
|
addr_t phys = Page::Get(usage);
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( unlikely(!phys) )
|
|
|
|
{
|
|
|
|
ForkCleanup(i, level);
|
|
|
|
return false;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t flags = entry & PML_FLAGS;
|
|
|
|
destpml->entry[i] = phys | flags;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Map the destination page.
|
|
|
|
addr_t destaddr = (addr_t) (FORKPML + level-1);
|
|
|
|
Map(phys, destaddr, PROT_KREAD | PROT_KWRITE);
|
|
|
|
InvalidatePage(destaddr);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
size_t offset = pmloffset * ENTRIES + i;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( 1 < level )
|
|
|
|
{
|
|
|
|
if ( !Fork(level-1, offset) )
|
|
|
|
{
|
2014-05-15 13:55:23 -04:00
|
|
|
Page::Put(phys, usage);
|
2014-01-03 19:04:29 -05:00
|
|
|
ForkCleanup(i, level);
|
|
|
|
return false;
|
2012-03-18 21:39:11 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
continue;
|
2012-03-18 21:39:11 -04:00
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Determine the source page's address.
|
|
|
|
const void* src = (const void*) (offset * 4096UL);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Determine the destination page's address.
|
|
|
|
void* dest = (void*) (FORKPML + level - 1);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
memcpy(dest, src, 4096UL);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool Fork(addr_t dir, size_t level, size_t pmloffset)
|
|
|
|
{
|
|
|
|
PML* destpml = FORKPML + level;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// This call always succeeds.
|
|
|
|
Map(dir, (addr_t) destpml, PROT_KREAD | PROT_KWRITE);
|
|
|
|
InvalidatePage((addr_t) destpml);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
return Fork(level, pmloffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an exact copy of the current address space.
|
|
|
|
addr_t Fork()
|
|
|
|
{
|
2014-05-15 13:55:23 -04:00
|
|
|
addr_t dir = Page::Get(PAGE_USAGE_PAGING_OVERHEAD);
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( dir == 0 )
|
|
|
|
return 0;
|
|
|
|
if ( !Fork(dir, TOPPMLLEVEL, 0) )
|
|
|
|
{
|
2014-05-15 13:55:23 -04:00
|
|
|
Page::Put(dir, PAGE_USAGE_PAGING_OVERHEAD);
|
2014-01-03 19:04:29 -05:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now, the new top pml needs to have its fractal memory fixed.
|
|
|
|
const addr_t flags = PML_PRESENT | PML_WRITABLE;
|
|
|
|
addr_t mapto;
|
|
|
|
addr_t childaddr;
|
|
|
|
|
|
|
|
(FORKPML + TOPPMLLEVEL)->entry[ENTRIES-1] = dir | flags;
|
|
|
|
childaddr = (FORKPML + TOPPMLLEVEL)->entry[ENTRIES-2] & PML_ADDRESS;
|
|
|
|
|
|
|
|
for ( size_t i = TOPPMLLEVEL-1; i > 0; i-- )
|
|
|
|
{
|
|
|
|
mapto = (addr_t) (FORKPML + i);
|
|
|
|
Map(childaddr, mapto, PROT_KREAD | PROT_KWRITE);
|
|
|
|
InvalidatePage(mapto);
|
|
|
|
(FORKPML + i)->entry[ENTRIES-1] = dir | flags;
|
|
|
|
childaddr = (FORKPML + i)->entry[ENTRIES-2] & PML_ADDRESS;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
return dir;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
|
|
|
|
} // namespace Memory
|
|
|
|
} // namespace Sortix
|