2012-03-21 12:19:26 -04:00
|
|
|
/*******************************************************************************
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-07-10 09:26:01 -04:00
|
|
|
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-07-10 09:26:01 -04:00
|
|
|
This file is part of Sortix.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-07-10 09:26:01 -04:00
|
|
|
Sortix is free software: you can redistribute it and/or modify it under the
|
|
|
|
terms of the GNU General Public License as published by the Free Software
|
|
|
|
Foundation, either version 3 of the License, or (at your option) any later
|
|
|
|
version.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-07-10 09:26:01 -04:00
|
|
|
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
|
|
|
details.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-07-10 09:26:01 -04:00
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
|
|
Sortix. If not, see <http://www.gnu.org/licenses/>.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-07-10 09:26:01 -04:00
|
|
|
x86-family/memorymanagement.cpp
|
|
|
|
Handles memory for the x86 family of architectures.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
*******************************************************************************/
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-09-22 08:57:20 -04:00
|
|
|
#include <assert.h>
|
2012-09-22 10:44:50 -04:00
|
|
|
#include <errno.h>
|
2012-09-22 14:38:34 -04:00
|
|
|
#include <string.h>
|
2013-01-08 18:41:35 -05:00
|
|
|
|
2013-10-26 20:42:10 -04:00
|
|
|
#include <sortix/mman.h>
|
|
|
|
|
|
|
|
#include <sortix/kernel/kernel.h>
|
|
|
|
#include <sortix/kernel/kthread.h>
|
|
|
|
#include <sortix/kernel/memorymanagement.h>
|
|
|
|
#include <sortix/kernel/panic.h>
|
|
|
|
#include <sortix/kernel/syscall.h>
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
#include "multiboot.h"
|
|
|
|
#include "memorymanagement.h"
|
2012-06-30 18:50:27 -04:00
|
|
|
#include "msr.h"
|
2011-10-02 09:58:08 -04:00
|
|
|
|
|
|
|
namespace Sortix
|
|
|
|
{
|
2011-12-23 07:09:09 -05:00
|
|
|
extern size_t end;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
|
|
|
namespace Page
|
|
|
|
{
|
|
|
|
void InitPushRegion(addr_t position, size_t length);
|
|
|
|
size_t pagesnotonstack;
|
|
|
|
size_t stackused;
|
2012-03-21 12:19:26 -04:00
|
|
|
size_t stackreserved;
|
2011-10-02 09:58:08 -04:00
|
|
|
size_t stacklength;
|
2011-11-26 14:14:57 -05:00
|
|
|
size_t totalmem;
|
2012-08-01 12:50:32 -04:00
|
|
|
kthread_mutex_t pagelock;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace Memory
|
|
|
|
{
|
2012-04-09 08:15:40 -04:00
|
|
|
addr_t currentdir = 0;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
|
|
|
void InitCPU();
|
|
|
|
void AllocateKernelPMLs();
|
2011-11-26 14:14:57 -05:00
|
|
|
int SysMemStat(size_t* memused, size_t* memtotal);
|
2012-06-30 18:50:27 -04:00
|
|
|
addr_t PAT2PMLFlags[PAT_NUM];
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
void InitCPU(multiboot_info_t* bootinfo)
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2011-12-23 07:09:09 -05:00
|
|
|
const size_t MAXKERNELEND = 0x400000UL; /* 4 MiB */
|
|
|
|
addr_t kernelend = Page::AlignUp((addr_t) &end);
|
|
|
|
if ( MAXKERNELEND < kernelend )
|
|
|
|
{
|
|
|
|
Log::PrintF("Warning: The kernel is too big! It ends at 0x%zx, "
|
|
|
|
"but the highest ending address supported is 0x%zx. "
|
|
|
|
"The system may not boot correctly.\n", kernelend,
|
|
|
|
MAXKERNELEND);
|
|
|
|
}
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
Page::stackreserved = 0;
|
2011-10-02 09:58:08 -04:00
|
|
|
Page::pagesnotonstack = 0;
|
2011-11-26 14:14:57 -05:00
|
|
|
Page::totalmem = 0;
|
2012-08-01 12:50:32 -04:00
|
|
|
Page::pagelock = KTHREAD_MUTEX_INITIALIZER;
|
2011-11-26 14:14:57 -05:00
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
if ( !( bootinfo->flags & MULTIBOOT_INFO_MEM_MAP ) )
|
|
|
|
{
|
|
|
|
Panic("memorymanagement.cpp: The memory map flag was't set in "
|
|
|
|
"the multiboot structure. Are your bootloader multiboot "
|
|
|
|
"specification compliant?");
|
|
|
|
}
|
|
|
|
|
2012-06-30 18:50:27 -04:00
|
|
|
// If supported, setup the Page Attribute Table feature that allows
|
|
|
|
// us to control the memory type (caching) of memory more precisely.
|
|
|
|
if ( MSR::IsPATSupported() )
|
|
|
|
{
|
|
|
|
MSR::InitializePAT();
|
|
|
|
for ( addr_t i = 0; i < PAT_NUM; i++ )
|
|
|
|
PAT2PMLFlags[i] = EncodePATAsPMLFlag(i);
|
|
|
|
}
|
|
|
|
// Otherwise, reroute all requests to the backwards compatible
|
|
|
|
// scheme. TODO: Not all early 32-bit x86 CPUs supports these
|
|
|
|
// values, so we need yet another fallback.
|
|
|
|
else
|
|
|
|
{
|
|
|
|
PAT2PMLFlags[PAT_UC] = PML_WRTHROUGH | PML_NOCACHE;
|
|
|
|
PAT2PMLFlags[PAT_WC] = PML_WRTHROUGH | PML_NOCACHE; // Approx.
|
|
|
|
PAT2PMLFlags[2] = 0; // No such flag.
|
|
|
|
PAT2PMLFlags[3] = 0; // No such flag.
|
|
|
|
PAT2PMLFlags[PAT_WT] = PML_WRTHROUGH;
|
|
|
|
PAT2PMLFlags[PAT_WP] = PML_WRTHROUGH; // Approx.
|
|
|
|
PAT2PMLFlags[PAT_WB] = 0;
|
|
|
|
PAT2PMLFlags[PAT_UCM] = PML_NOCACHE;
|
|
|
|
}
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
// Initialize CPU-specific things.
|
|
|
|
InitCPU();
|
|
|
|
|
|
|
|
typedef const multiboot_memory_map_t* mmap_t;
|
|
|
|
|
|
|
|
// Loop over every detected memory region.
|
2011-10-10 14:14:37 -04:00
|
|
|
for (
|
2012-03-21 12:19:26 -04:00
|
|
|
mmap_t mmap = (mmap_t) (addr_t) bootinfo->mmap_addr;
|
2011-10-10 14:14:37 -04:00
|
|
|
(addr_t) mmap < bootinfo->mmap_addr + bootinfo->mmap_length;
|
|
|
|
mmap = (mmap_t) ((addr_t) mmap + mmap->size + sizeof(mmap->size))
|
|
|
|
)
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
|
|
|
// Check that we can use this kind of RAM.
|
|
|
|
if ( mmap->type != 1 ) { continue; }
|
|
|
|
|
2011-10-22 09:17:58 -04:00
|
|
|
// The kernel's code may split this memory area into multiple pieces.
|
2011-10-02 09:58:08 -04:00
|
|
|
addr_t base = (addr_t) mmap->addr;
|
2011-12-23 10:45:07 -05:00
|
|
|
size_t length = Page::AlignDown(mmap->len);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-06-20 08:35:40 -04:00
|
|
|
#if defined(__i386__)
|
2011-10-02 09:58:08 -04:00
|
|
|
// Figure out if the memory area is addressable (are our pointers big enough?)
|
|
|
|
if ( 0xFFFFFFFFULL < mmap->addr ) { continue; }
|
|
|
|
if ( 0xFFFFFFFFULL < mmap->addr + mmap->len ) { length = 0x100000000ULL - mmap->addr; }
|
|
|
|
#endif
|
|
|
|
|
2011-11-26 14:14:57 -05:00
|
|
|
// Count the amount of usable RAM (even if reserved for kernel).
|
|
|
|
Page::totalmem += length;
|
|
|
|
|
2011-12-22 08:13:18 -05:00
|
|
|
// Give all the physical memory to the physical memory allocator
|
|
|
|
// but make sure not to give it things we already use.
|
2013-06-26 11:14:07 -04:00
|
|
|
addr_t regionstart = base;
|
|
|
|
addr_t regionend = base + length;
|
2011-12-22 08:13:18 -05:00
|
|
|
addr_t processed = regionstart;
|
|
|
|
while ( processed < regionend )
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2011-12-22 08:13:18 -05:00
|
|
|
addr_t lowest = processed;
|
|
|
|
addr_t highest = regionend;
|
|
|
|
|
|
|
|
// Don't allocate the kernel.
|
2011-12-23 07:09:09 -05:00
|
|
|
if ( lowest < kernelend ) { processed = kernelend; continue; }
|
2011-12-22 08:13:18 -05:00
|
|
|
|
|
|
|
// Don't give any of our modules to the physical page
|
|
|
|
// allocator, we'll need them.
|
|
|
|
bool continuing = false;
|
2012-03-21 12:19:26 -04:00
|
|
|
uint32_t* modules = (uint32_t*) (addr_t) bootinfo->mods_addr;
|
2011-12-22 08:13:18 -05:00
|
|
|
for ( uint32_t i = 0; i < bootinfo->mods_count; i++ )
|
|
|
|
{
|
|
|
|
size_t modsize = (size_t) (modules[2*i+1] - modules[2*i+0]);
|
|
|
|
addr_t modstart = (addr_t) modules[2*i+0];
|
|
|
|
addr_t modend = modstart + modsize;
|
|
|
|
if ( modstart <= processed && processed < modend )
|
|
|
|
{
|
|
|
|
processed = modend;
|
|
|
|
continuing = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if ( lowest <= modstart && modstart < highest )
|
|
|
|
{
|
|
|
|
highest = modstart;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( continuing ) { continue; }
|
|
|
|
|
|
|
|
if ( highest <= lowest ) { break; }
|
|
|
|
|
|
|
|
// Now that we have a continious area not used by anything,
|
|
|
|
// let's forward it to the physical page allocator.
|
|
|
|
lowest = Page::AlignUp(lowest);
|
|
|
|
highest = Page::AlignUp(highest);
|
|
|
|
size_t size = highest - lowest;
|
|
|
|
Page::InitPushRegion(lowest, size);
|
|
|
|
processed = highest;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the physical allocator couldn't handle the vast amount of
|
|
|
|
// physical pages, it may decide to drop some. This shouldn't happen
|
|
|
|
// until the pebibyte era of RAM.
|
|
|
|
if ( 0 < Page::pagesnotonstack )
|
|
|
|
{
|
2011-11-28 19:21:59 -05:00
|
|
|
Log::PrintF("%zu bytes of RAM aren't used due to technical "
|
2011-10-02 09:58:08 -04:00
|
|
|
"restrictions.\n", Page::pagesnotonstack * 0x1000UL);
|
|
|
|
}
|
|
|
|
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
Memory::Unmap(0x0); // Remove NULL.
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
// Finish allocating the top level PMLs for the kernels use.
|
|
|
|
AllocateKernelPMLs();
|
|
|
|
}
|
|
|
|
|
2011-11-26 14:14:57 -05:00
|
|
|
void Statistics(size_t* amountused, size_t* totalmem)
|
|
|
|
{
|
2012-03-21 12:19:26 -04:00
|
|
|
size_t memfree = (Page::stackused - Page::stackreserved) << 12UL;
|
2011-12-23 10:45:07 -05:00
|
|
|
size_t memused = Page::totalmem - memfree;
|
|
|
|
if ( amountused ) { *amountused = memused; }
|
2011-11-26 14:14:57 -05:00
|
|
|
if ( totalmem ) { *totalmem = Page::totalmem; }
|
|
|
|
}
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
// Prepare the non-forkable kernel PMLs such that forking the kernel
|
|
|
|
// address space will always keep the kernel mapped.
|
|
|
|
void AllocateKernelPMLs()
|
|
|
|
{
|
|
|
|
const addr_t flags = PML_PRESENT | PML_WRITABLE;
|
|
|
|
|
|
|
|
PML* const pml = PMLS[TOPPMLLEVEL];
|
|
|
|
|
|
|
|
size_t start = ENTRIES / 2;
|
|
|
|
size_t end = ENTRIES;
|
|
|
|
|
|
|
|
for ( size_t i = start; i < end; i++ )
|
|
|
|
{
|
|
|
|
if ( pml->entry[i] & PML_PRESENT ) { continue; }
|
|
|
|
|
|
|
|
addr_t page = Page::Get();
|
|
|
|
if ( !page ) { Panic("out of memory allocating boot PMLs"); }
|
|
|
|
|
|
|
|
pml->entry[i] = page | flags;
|
Fixed two very nasty bugs in the x86 memory management code.
1) The PML2 was not initialized to zeroes, thus leaving some bits behind that
caused the fork code to go crazy, forking the unforkable, and mapping addresses
that never, ever, should have been mapped, leaving behind a trail of page faults
and general protection faults on some computers, while other computers worked
because the uninitalized memory just wasn't uninitialized enough. Yep, this was
a schrödinbug!
2) Fixed a time bomb. The kernel heap was accidentally put such that whenever a
few megabytes were allocated, it would begin overwriting the physical page stack
causing unthinkable events to unfold and would probably be even more obscure to
debug than 1).
Oh, and some string errors fixed and removed RunApplication from kernel.cpp,
funny thing that even linked in the first place. Guess, the optimizer actually
did work for once. :)
2011-10-19 21:40:37 -04:00
|
|
|
|
|
|
|
// Invalidate the new PML and reset it to zeroes.
|
|
|
|
addr_t pmladdr = (addr_t) (PMLS[TOPPMLLEVEL-1] + i);
|
|
|
|
InvalidatePage(pmladdr);
|
2012-09-22 14:38:34 -04:00
|
|
|
memset((void*) pmladdr, 0, sizeof(PML));
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace Page
|
|
|
|
{
|
|
|
|
void ExtendStack()
|
|
|
|
{
|
|
|
|
// This call will always succeed, if it didn't, then the stack
|
|
|
|
// wouldn't be full, and thus this function won't be called.
|
2013-03-12 16:29:19 -04:00
|
|
|
addr_t page = GetUnlocked();
|
2011-10-02 09:58:08 -04:00
|
|
|
|
|
|
|
// This call will also succeed, since there are plenty of physical
|
|
|
|
// pages available and it might need some.
|
2012-03-21 12:19:26 -04:00
|
|
|
addr_t virt = (addr_t) (STACK + stacklength);
|
|
|
|
if ( !Memory::Map(page, virt, PROT_KREAD | PROT_KWRITE) )
|
2011-11-28 19:21:59 -05:00
|
|
|
{
|
|
|
|
Panic("Unable to extend page stack, which should have worked");
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
|
|
|
// TODO: This may not be needed during the boot process!
|
|
|
|
//Memory::InvalidatePage((addr_t) (STACK + stacklength));
|
|
|
|
|
|
|
|
stacklength += 4096UL / sizeof(addr_t);
|
|
|
|
}
|
|
|
|
|
|
|
|
void InitPushRegion(addr_t position, size_t length)
|
|
|
|
{
|
|
|
|
// Align our entries on page boundaries.
|
|
|
|
addr_t newposition = Page::AlignUp(position);
|
|
|
|
length = Page::AlignDown((position + length) - newposition);
|
|
|
|
position = newposition;
|
|
|
|
|
|
|
|
while ( length )
|
|
|
|
{
|
|
|
|
if ( unlikely(stackused == stacklength) )
|
|
|
|
{
|
|
|
|
if ( stackused == MAXSTACKLENGTH )
|
|
|
|
{
|
|
|
|
pagesnotonstack += length / 4096UL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ExtendStack();
|
|
|
|
}
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
addr_t* stackentry = &(STACK[stackused++]);
|
|
|
|
*stackentry = position;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
|
|
|
length -= 4096UL;
|
|
|
|
position += 4096UL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-01 12:50:32 -04:00
|
|
|
bool ReserveUnlocked(size_t* counter, size_t least, size_t ideal)
|
2012-03-21 12:19:26 -04:00
|
|
|
{
|
2012-09-22 08:57:20 -04:00
|
|
|
assert(least < ideal);
|
2012-03-21 12:19:26 -04:00
|
|
|
size_t available = stackused - stackreserved;
|
2012-09-22 10:44:50 -04:00
|
|
|
if ( least < available ) { errno = ENOMEM; return false; }
|
2012-03-21 12:19:26 -04:00
|
|
|
if ( available < ideal ) { ideal = available; }
|
|
|
|
stackreserved += ideal;
|
|
|
|
*counter += ideal;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-08-01 12:50:32 -04:00
|
|
|
bool Reserve(size_t* counter, size_t least, size_t ideal)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
return ReserveUnlocked(counter, least, ideal);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ReserveUnlocked(size_t* counter, size_t amount)
|
|
|
|
{
|
|
|
|
return ReserveUnlocked(counter, amount, amount);
|
|
|
|
}
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
bool Reserve(size_t* counter, size_t amount)
|
|
|
|
{
|
2012-08-01 12:50:32 -04:00
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
return ReserveUnlocked(counter, amount);
|
2012-03-21 12:19:26 -04:00
|
|
|
}
|
|
|
|
|
2012-08-01 12:50:32 -04:00
|
|
|
addr_t GetReservedUnlocked(size_t* counter)
|
2012-03-21 12:19:26 -04:00
|
|
|
{
|
2012-08-01 12:50:32 -04:00
|
|
|
if ( !*counter ) { return 0; }
|
2012-09-22 08:57:20 -04:00
|
|
|
assert(stackused); // After all, we did _reserve_ the memory.
|
2012-03-21 12:19:26 -04:00
|
|
|
addr_t result = STACK[--stackused];
|
2012-09-22 08:57:20 -04:00
|
|
|
assert(result == AlignDown(result));
|
2012-03-21 12:19:26 -04:00
|
|
|
stackreserved--;
|
|
|
|
(*counter)--;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-08-01 12:50:32 -04:00
|
|
|
addr_t GetReserved(size_t* counter)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
return GetReservedUnlocked(counter);
|
|
|
|
}
|
|
|
|
|
|
|
|
addr_t GetUnlocked()
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2012-09-22 08:57:20 -04:00
|
|
|
assert(stackreserved <= stackused);
|
2012-03-21 12:19:26 -04:00
|
|
|
if ( unlikely(stackreserved == stackused) )
|
|
|
|
{
|
2012-09-22 10:44:50 -04:00
|
|
|
errno = ENOMEM;
|
2012-03-21 12:19:26 -04:00
|
|
|
return 0;
|
|
|
|
}
|
2012-03-02 07:51:03 -05:00
|
|
|
addr_t result = STACK[--stackused];
|
2012-09-22 08:57:20 -04:00
|
|
|
assert(result == AlignDown(result));
|
2012-03-02 07:51:03 -05:00
|
|
|
return result;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
2012-08-01 12:50:32 -04:00
|
|
|
addr_t Get()
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
return GetUnlocked();
|
|
|
|
}
|
|
|
|
|
|
|
|
void PutUnlocked(addr_t page)
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2012-09-22 08:57:20 -04:00
|
|
|
assert(page == AlignDown(page));
|
2013-03-12 16:29:19 -04:00
|
|
|
if ( unlikely(stackused == stacklength) )
|
|
|
|
{
|
|
|
|
if ( stackused == MAXSTACKLENGTH )
|
|
|
|
{
|
|
|
|
pagesnotonstack++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ExtendStack();
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
STACK[stackused++] = page;
|
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
|
|
|
void Put(addr_t page)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
PutUnlocked(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Lock()
|
|
|
|
{
|
|
|
|
kthread_mutex_lock(&pagelock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Unlock()
|
|
|
|
{
|
|
|
|
kthread_mutex_unlock(&pagelock);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2012-03-18 21:39:11 -04:00
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
namespace Memory
|
|
|
|
{
|
2012-03-21 12:19:26 -04:00
|
|
|
addr_t ProtectionToPMLFlags(int prot)
|
|
|
|
{
|
|
|
|
addr_t result = 0;
|
|
|
|
if ( prot & PROT_EXEC ) { result |= PML_USERSPACE; }
|
|
|
|
if ( prot & PROT_READ ) { result |= PML_USERSPACE; }
|
|
|
|
if ( prot & PROT_WRITE ) { result |= PML_USERSPACE | PML_WRITABLE; }
|
|
|
|
if ( prot & PROT_KEXEC ) { result |= 0; }
|
|
|
|
if ( prot & PROT_KREAD ) { result |= 0; }
|
2012-08-01 12:54:45 -04:00
|
|
|
if ( prot & PROT_KWRITE ) { result |= 0; }
|
2012-03-21 12:19:26 -04:00
|
|
|
if ( prot & PROT_FORK ) { result |= PML_FORK; }
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int PMLFlagsToProtection(addr_t flags)
|
|
|
|
{
|
2012-08-01 12:54:45 -04:00
|
|
|
int prot = PROT_KREAD | PROT_KWRITE | PROT_KEXEC;
|
2012-03-21 12:19:26 -04:00
|
|
|
bool user = flags & PML_USERSPACE;
|
|
|
|
bool write = flags & PML_WRITABLE;
|
|
|
|
if ( user ) { prot |= PROT_EXEC | PROT_READ; }
|
|
|
|
if ( user && write ) { prot |= PROT_WRITE; }
|
|
|
|
return prot;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ProvidedProtection(int prot)
|
|
|
|
{
|
|
|
|
addr_t flags = ProtectionToPMLFlags(prot);
|
|
|
|
return PMLFlagsToProtection(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool LookUp(addr_t mapto, addr_t* physical, int* protection)
|
|
|
|
{
|
|
|
|
// Translate the virtual address into PML indexes.
|
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
{
|
|
|
|
pmlchildid[i] = (mapto >> (12+(i-1)*TRANSBITS)) & MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
int prot = PROT_USER | PROT_KERNEL | PROT_FORK;
|
|
|
|
|
|
|
|
// For each PML level, make sure it exists.
|
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
|
|
|
|
|
|
|
addr_t entry = pml->entry[childid];
|
|
|
|
if ( !(entry & PML_PRESENT) ) { return false; }
|
|
|
|
int entryflags = entry & PML_ADDRESS;
|
|
|
|
int entryprot = PMLFlagsToProtection(entryflags);
|
|
|
|
prot &= entryprot;
|
|
|
|
|
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
|
|
|
offset = offset * ENTRIES + childid;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr_t entry = (PMLS[1] + offset)->entry[pmlchildid[1]];
|
2013-01-12 09:16:19 -05:00
|
|
|
if ( !(entry & PML_PRESENT) ) { return false; }
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
int entryflags = entry & PML_ADDRESS;
|
|
|
|
int entryprot = PMLFlagsToProtection(entryflags);
|
|
|
|
prot &= entryprot;
|
|
|
|
addr_t phys = entry & PML_ADDRESS;
|
|
|
|
|
|
|
|
if ( physical ) { *physical = phys; }
|
|
|
|
if ( protection ) { *protection = prot; }
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
void InvalidatePage(addr_t /*addr*/)
|
|
|
|
{
|
|
|
|
// TODO: Actually just call the instruction.
|
|
|
|
Flush();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flushes the Translation Lookaside Buffer (TLB).
|
|
|
|
void Flush()
|
|
|
|
{
|
|
|
|
asm volatile("mov %0, %%cr3":: "r"(currentdir));
|
|
|
|
}
|
|
|
|
|
2012-04-09 08:15:40 -04:00
|
|
|
addr_t GetAddressSpace()
|
|
|
|
{
|
|
|
|
return currentdir;
|
|
|
|
}
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
addr_t SwitchAddressSpace(addr_t addrspace)
|
|
|
|
{
|
2012-03-01 18:09:08 -05:00
|
|
|
// Have fun debugging this.
|
Fixed the horrible 'nofoo' bug!
When compiled with gcc 4.6.1, 32-bit Sortix would triple fault during
early boot: When the TLB is being flushed, somehow a garbage value had
sneaked into Sortix::Memory::currentdir, and a non-page aligned (and
garbage) page directory is loaded. (Triple fault, here we come!)
However, adding a volatile addr_t foo after the currentdir variable
actually caused the system to boot correctly - the garbage was written
into that variable instead. To debug the problem, I set the foo value
to 0: as long as !foo (hence the name nofoo) everything was alright.
After closer examination I found that the initrd open code wrote to a
pointer supplied by kernel.cpp. The element pointed to was on the
stack. Worse, its address was the same as currentdir (now foo).
Indeed, the stack had gone into the kernel's data segment!
Turns out that this gcc configuration stores variables in the data
segment in the reverse order they are defined in, whereas previous
compilers did the opposite. The hack used to set up the stack during
early boot relied on this (now obviously incorrect) fact.
In effect, the stack was initialized to the end of the stack, not
the start of it: completely ignoring all the nice stack space
allocated in kernel.cpp.
I did not see that one coming.
2011-12-24 21:33:12 -05:00
|
|
|
if ( currentdir != Page::AlignDown(currentdir) )
|
|
|
|
{
|
2012-03-01 18:09:08 -05:00
|
|
|
PanicF("The variable containing the current address space "
|
|
|
|
"contains garbage all of sudden: it isn't page-aligned. "
|
|
|
|
"It contains the value 0x%zx.", currentdir);
|
Fixed the horrible 'nofoo' bug!
When compiled with gcc 4.6.1, 32-bit Sortix would triple fault during
early boot: When the TLB is being flushed, somehow a garbage value had
sneaked into Sortix::Memory::currentdir, and a non-page aligned (and
garbage) page directory is loaded. (Triple fault, here we come!)
However, adding a volatile addr_t foo after the currentdir variable
actually caused the system to boot correctly - the garbage was written
into that variable instead. To debug the problem, I set the foo value
to 0: as long as !foo (hence the name nofoo) everything was alright.
After closer examination I found that the initrd open code wrote to a
pointer supplied by kernel.cpp. The element pointed to was on the
stack. Worse, its address was the same as currentdir (now foo).
Indeed, the stack had gone into the kernel's data segment!
Turns out that this gcc configuration stores variables in the data
segment in the reverse order they are defined in, whereas previous
compilers did the opposite. The hack used to set up the stack during
early boot relied on this (now obviously incorrect) fact.
In effect, the stack was initialized to the end of the stack, not
the start of it: completely ignoring all the nice stack space
allocated in kernel.cpp.
I did not see that one coming.
2011-12-24 21:33:12 -05:00
|
|
|
}
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
// Don't switch if we are already there.
|
|
|
|
if ( addrspace == currentdir ) { return currentdir; }
|
|
|
|
|
2011-11-20 18:27:10 -05:00
|
|
|
if ( addrspace & 0xFFFUL ) { PanicF("addrspace 0x%zx was not page-aligned!", addrspace); }
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
addr_t previous = currentdir;
|
|
|
|
|
2011-10-22 09:17:58 -04:00
|
|
|
// Switch and flush the TLB.
|
2011-10-02 09:58:08 -04:00
|
|
|
asm volatile("mov %0, %%cr3":: "r"(addrspace));
|
|
|
|
|
|
|
|
currentdir = addrspace;
|
|
|
|
|
|
|
|
return previous;
|
|
|
|
}
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
bool MapRange(addr_t where, size_t bytes, int protection)
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
|
|
|
for ( addr_t page = where; page < where + bytes; page += 4096UL )
|
|
|
|
{
|
|
|
|
addr_t physicalpage = Page::Get();
|
|
|
|
if ( physicalpage == 0 )
|
|
|
|
{
|
|
|
|
while ( where < page )
|
|
|
|
{
|
|
|
|
page -= 4096UL;
|
2012-03-21 12:19:26 -04:00
|
|
|
physicalpage = Unmap(page);
|
2011-10-02 09:58:08 -04:00
|
|
|
Page::Put(physicalpage);
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
Map(physicalpage, page, protection);
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
bool UnmapRange(addr_t where, size_t bytes)
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
|
|
|
for ( addr_t page = where; page < where + bytes; page += 4096UL )
|
|
|
|
{
|
2012-03-21 12:19:26 -04:00
|
|
|
addr_t physicalpage = Unmap(page);
|
2011-10-02 09:58:08 -04:00
|
|
|
Page::Put(physicalpage);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
static bool MapInternal(addr_t physical, addr_t mapto, int prot, addr_t extraflags = 0)
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2012-03-21 12:19:26 -04:00
|
|
|
addr_t flags = ProtectionToPMLFlags(prot) | PML_PRESENT;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2011-10-22 09:17:58 -04:00
|
|
|
// Translate the virtual address into PML indexes.
|
2011-10-02 09:58:08 -04:00
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
{
|
|
|
|
pmlchildid[i] = (mapto >> (12+(i-1)*TRANSBITS)) & MASK;
|
|
|
|
}
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
// For each PML level, make sure it exists.
|
2011-10-02 09:58:08 -04:00
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
|
|
|
|
|
|
|
addr_t& entry = pml->entry[childid];
|
|
|
|
|
2011-11-28 19:21:59 -05:00
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
|
|
|
size_t childoffset = offset * ENTRIES + childid;
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
if ( !(entry & PML_PRESENT) )
|
|
|
|
{
|
|
|
|
// TODO: Possible memory leak when page allocation fails.
|
|
|
|
addr_t page = Page::Get();
|
2012-03-21 12:19:26 -04:00
|
|
|
|
|
|
|
if ( !page ) { return false; }
|
|
|
|
addr_t pmlflags = PML_PRESENT | PML_WRITABLE | PML_USERSPACE
|
|
|
|
| PML_FORK;
|
|
|
|
entry = page | pmlflags;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
|
|
|
// Invalidate the new PML and reset it to zeroes.
|
2011-11-28 19:21:59 -05:00
|
|
|
addr_t pmladdr = (addr_t) (PMLS[i-1] + childoffset);
|
2011-10-02 09:58:08 -04:00
|
|
|
InvalidatePage(pmladdr);
|
2012-09-22 14:38:34 -04:00
|
|
|
memset((void*) pmladdr, 0, sizeof(PML));
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
2011-11-28 19:21:59 -05:00
|
|
|
offset = childoffset;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
2011-10-22 09:17:58 -04:00
|
|
|
// Actually map the physical page to the virtual page.
|
2012-06-30 18:50:27 -04:00
|
|
|
const addr_t entry = physical | flags | extraflags;
|
|
|
|
(PMLS[1] + offset)->entry[pmlchildid[1]] = entry;
|
2012-03-21 12:19:26 -04:00
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
bool Map(addr_t physical, addr_t mapto, int prot)
|
|
|
|
{
|
|
|
|
return MapInternal(physical, mapto, prot);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
void PageProtect(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
2012-08-04 07:58:13 -04:00
|
|
|
if ( !LookUp(mapto, &phys, NULL) )
|
|
|
|
return;
|
2012-03-21 12:19:26 -04:00
|
|
|
Map(phys, mapto, protection);
|
|
|
|
}
|
|
|
|
|
|
|
|
void PageProtectAdd(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
|
|
|
int prot;
|
2012-08-04 07:58:13 -04:00
|
|
|
if ( !LookUp(mapto, &phys, &prot) )
|
|
|
|
return;
|
2012-03-21 12:19:26 -04:00
|
|
|
prot |= protection;
|
|
|
|
Map(phys, mapto, prot);
|
|
|
|
}
|
|
|
|
|
|
|
|
void PageProtectSub(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
|
|
|
int prot;
|
2012-08-04 07:58:13 -04:00
|
|
|
if ( !LookUp(mapto, &phys, &prot) )
|
|
|
|
return;
|
2012-03-21 12:19:26 -04:00
|
|
|
prot &= ~protection;
|
|
|
|
Map(phys, mapto, prot);
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
addr_t Unmap(addr_t mapto)
|
|
|
|
{
|
2011-10-22 09:17:58 -04:00
|
|
|
// Translate the virtual address into PML indexes.
|
2011-10-02 09:58:08 -04:00
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
{
|
|
|
|
pmlchildid[i] = (mapto >> (12+(i-1)*TRANSBITS)) & MASK;
|
|
|
|
}
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
// For each PML level, make sure it exists.
|
2011-10-02 09:58:08 -04:00
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
|
|
|
|
|
|
|
addr_t& entry = pml->entry[childid];
|
|
|
|
|
|
|
|
if ( !(entry & PML_PRESENT) )
|
|
|
|
{
|
2012-03-21 12:19:26 -04:00
|
|
|
PanicF("Attempted to unmap virtual page %p, but the virtual"
|
|
|
|
" page was wasn't mapped. This is a bug in the code "
|
|
|
|
"code calling this function", mapto);
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
2011-10-22 09:17:58 -04:00
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
2011-10-02 09:58:08 -04:00
|
|
|
offset = offset * ENTRIES + childid;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr_t& entry = (PMLS[1] + offset)->entry[pmlchildid[1]];
|
|
|
|
addr_t result = entry & PML_ADDRESS;
|
|
|
|
entry = 0;
|
|
|
|
|
|
|
|
// TODO: If all the entries in PML[N] are not-present, then who
|
|
|
|
// unmaps its entry from PML[N-1]?
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
bool MapPAT(addr_t physical, addr_t mapto, int prot, addr_t mtype)
|
2012-06-30 18:50:27 -04:00
|
|
|
{
|
|
|
|
addr_t extraflags = PAT2PMLFlags[mtype];
|
2012-03-21 12:19:26 -04:00
|
|
|
return MapInternal(physical, mapto, prot, extraflags);
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
void ForkCleanup(size_t i, size_t level)
|
|
|
|
{
|
|
|
|
PML* destpml = FORKPML + level;
|
|
|
|
if ( !i ) { return; }
|
|
|
|
for ( size_t n = 0; n < i-1; n++ )
|
|
|
|
{
|
|
|
|
addr_t entry = destpml->entry[i];
|
|
|
|
if ( !(entry & PML_FORK ) ) { continue; }
|
|
|
|
addr_t phys = entry & PML_ADDRESS;
|
|
|
|
if ( 1 < level )
|
|
|
|
{
|
|
|
|
addr_t destaddr = (addr_t) (FORKPML + level-1);
|
2012-03-21 12:19:26 -04:00
|
|
|
Map(phys, destaddr, PROT_KREAD | PROT_KWRITE);
|
2012-03-18 21:39:11 -04:00
|
|
|
InvalidatePage(destaddr);
|
|
|
|
ForkCleanup(ENTRIES+1UL, level-1);
|
|
|
|
}
|
|
|
|
Page::Put(phys);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
// TODO: Copying every frame is endlessly useless in many uses. It'd be
|
2011-10-22 09:17:58 -04:00
|
|
|
// nice to upgrade this to a copy-on-write algorithm.
|
2012-03-18 21:39:11 -04:00
|
|
|
bool Fork(size_t level, size_t pmloffset)
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2012-03-18 21:39:11 -04:00
|
|
|
PML* destpml = FORKPML + level;
|
|
|
|
for ( size_t i = 0; i < ENTRIES; i++ )
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2012-03-18 21:39:11 -04:00
|
|
|
addr_t entry = (PMLS[level] + pmloffset)->entry[i];
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
// Link the entry if it isn't supposed to be forked.
|
|
|
|
if ( !(entry & PML_FORK ) )
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2012-03-18 21:39:11 -04:00
|
|
|
destpml->entry[i] = entry;
|
2011-10-02 09:58:08 -04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
addr_t phys = Page::Get();
|
|
|
|
if ( unlikely(!phys) ) { ForkCleanup(i, level); return false; }
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
addr_t flags = entry & PML_FLAGS;
|
|
|
|
destpml->entry[i] = phys | flags;
|
|
|
|
|
|
|
|
// Map the destination page.
|
|
|
|
addr_t destaddr = (addr_t) (FORKPML + level-1);
|
2012-03-21 12:19:26 -04:00
|
|
|
Map(phys, destaddr, PROT_KREAD | PROT_KWRITE);
|
2012-03-18 21:39:11 -04:00
|
|
|
InvalidatePage(destaddr);
|
|
|
|
|
|
|
|
size_t offset = pmloffset * ENTRIES + i;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
if ( 1 < level )
|
|
|
|
{
|
|
|
|
if ( !Fork(level-1, offset) )
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2012-03-18 21:39:11 -04:00
|
|
|
Page::Put(phys);
|
|
|
|
ForkCleanup(i, level);
|
|
|
|
return false;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2012-03-18 21:39:11 -04:00
|
|
|
continue;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
// Determine the source page's address.
|
|
|
|
const void* src = (const void*) (offset * 4096UL);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
// Determine the destination page's address.
|
|
|
|
void* dest = (void*) (FORKPML + level - 1);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-09-22 14:38:34 -04:00
|
|
|
memcpy(dest, src, 4096UL);
|
2012-03-18 21:39:11 -04:00
|
|
|
}
|
2011-09-21 14:52:29 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
bool Fork(addr_t dir, size_t level, size_t pmloffset)
|
|
|
|
{
|
|
|
|
PML* destpml = FORKPML + level;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
// This call always succeeds.
|
2012-03-21 12:19:26 -04:00
|
|
|
Map(dir, (addr_t) destpml, PROT_KREAD | PROT_KWRITE);
|
2012-03-18 21:39:11 -04:00
|
|
|
InvalidatePage((addr_t) destpml);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
return Fork(level, pmloffset);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
// Create an exact copy of the current address space.
|
|
|
|
addr_t Fork()
|
|
|
|
{
|
|
|
|
addr_t dir = Page::Get();
|
|
|
|
if ( dir == 0 ) { return 0; }
|
|
|
|
if ( !Fork(dir, TOPPMLLEVEL, 0) ) { Page::Put(dir); return 0; }
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
// Now, the new top pml needs to have its fractal memory fixed.
|
|
|
|
const addr_t flags = PML_PRESENT | PML_WRITABLE;
|
|
|
|
addr_t mapto;
|
|
|
|
addr_t childaddr;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
(FORKPML + TOPPMLLEVEL)->entry[ENTRIES-1] = dir | flags;
|
|
|
|
childaddr = (FORKPML + TOPPMLLEVEL)->entry[ENTRIES-2] & PML_ADDRESS;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-18 21:39:11 -04:00
|
|
|
for ( size_t i = TOPPMLLEVEL-1; i > 0; i-- )
|
|
|
|
{
|
|
|
|
mapto = (addr_t) (FORKPML + i);
|
2012-03-21 12:19:26 -04:00
|
|
|
Map(childaddr, mapto, PROT_KREAD | PROT_KWRITE);
|
2012-03-18 21:39:11 -04:00
|
|
|
InvalidatePage(mapto);
|
|
|
|
(FORKPML + i)->entry[ENTRIES-1] = dir | flags;
|
|
|
|
childaddr = (FORKPML + i)->entry[ENTRIES-2] & PML_ADDRESS;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2012-03-18 21:39:11 -04:00
|
|
|
return dir;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|