2012-03-21 12:19:26 -04:00
|
|
|
/*******************************************************************************
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2014.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-07-10 09:26:01 -04:00
|
|
|
This file is part of Sortix.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-07-10 09:26:01 -04:00
|
|
|
Sortix is free software: you can redistribute it and/or modify it under the
|
|
|
|
terms of the GNU General Public License as published by the Free Software
|
|
|
|
Foundation, either version 3 of the License, or (at your option) any later
|
|
|
|
version.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-07-10 09:26:01 -04:00
|
|
|
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
|
|
|
details.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-07-10 09:26:01 -04:00
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
|
|
Sortix. If not, see <http://www.gnu.org/licenses/>.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2013-07-10 09:26:01 -04:00
|
|
|
x86-family/memorymanagement.cpp
|
|
|
|
Handles memory for the x86 family of architectures.
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-03-21 12:19:26 -04:00
|
|
|
*******************************************************************************/
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2012-09-22 08:57:20 -04:00
|
|
|
#include <assert.h>
|
2012-09-22 10:44:50 -04:00
|
|
|
#include <errno.h>
|
2012-09-22 14:38:34 -04:00
|
|
|
#include <string.h>
|
2013-01-08 18:41:35 -05:00
|
|
|
|
2013-10-26 20:42:10 -04:00
|
|
|
#include <sortix/mman.h>
|
|
|
|
|
|
|
|
#include <sortix/kernel/kernel.h>
|
|
|
|
#include <sortix/kernel/kthread.h>
|
|
|
|
#include <sortix/kernel/memorymanagement.h>
|
|
|
|
#include <sortix/kernel/panic.h>
|
2013-09-14 11:10:14 -04:00
|
|
|
#include <sortix/kernel/pat.h>
|
2013-10-26 20:42:10 -04:00
|
|
|
#include <sortix/kernel/syscall.h>
|
|
|
|
|
2011-10-02 09:58:08 -04:00
|
|
|
#include "multiboot.h"
|
|
|
|
#include "memorymanagement.h"
|
2012-06-30 18:50:27 -04:00
|
|
|
#include "msr.h"
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
namespace Sortix {
|
|
|
|
|
|
|
|
extern size_t end;
|
|
|
|
|
|
|
|
} // namespace Sortix
|
|
|
|
|
|
|
|
namespace Sortix {
|
|
|
|
namespace Page {
|
|
|
|
|
|
|
|
void InitPushRegion(addr_t position, size_t length);
|
|
|
|
size_t pagesnotonstack;
|
|
|
|
size_t stackused;
|
|
|
|
size_t stackreserved;
|
|
|
|
size_t stacklength;
|
|
|
|
size_t totalmem;
|
|
|
|
kthread_mutex_t pagelock;
|
|
|
|
|
|
|
|
} // namespace Page
|
|
|
|
} // namespace Sortix
|
|
|
|
|
|
|
|
namespace Sortix {
|
|
|
|
namespace Memory {
|
|
|
|
|
|
|
|
addr_t currentdir = 0;
|
|
|
|
|
|
|
|
void InitCPU();
|
|
|
|
void AllocateKernelPMLs();
|
|
|
|
int SysMemStat(size_t* memused, size_t* memtotal);
|
|
|
|
addr_t PAT2PMLFlags[PAT_NUM];
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void InitCPU(multiboot_info_t* bootinfo)
|
|
|
|
{
|
|
|
|
const size_t MAXKERNELEND = 0x400000UL; /* 4 MiB */
|
|
|
|
addr_t kernelend = Page::AlignUp((addr_t) &end);
|
|
|
|
if ( MAXKERNELEND < kernelend )
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
Log::PrintF("Warning: The kernel is too big! It ends at 0x%zx, "
|
|
|
|
"but the highest ending address supported is 0x%zx. "
|
|
|
|
"The system may not boot correctly.\n", kernelend,
|
|
|
|
MAXKERNELEND);
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
Page::stackreserved = 0;
|
|
|
|
Page::pagesnotonstack = 0;
|
|
|
|
Page::totalmem = 0;
|
|
|
|
Page::pagelock = KTHREAD_MUTEX_INITIALIZER;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( !( bootinfo->flags & MULTIBOOT_INFO_MEM_MAP ) )
|
|
|
|
Panic("memorymanagement.cpp: The memory map flag was't set in "
|
|
|
|
"the multiboot structure. Are your bootloader multiboot "
|
|
|
|
"specification compliant?");
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// If supported, setup the Page Attribute Table feature that allows
|
|
|
|
// us to control the memory type (caching) of memory more precisely.
|
2013-09-14 11:10:14 -04:00
|
|
|
if ( IsPATSupported() )
|
2014-01-03 19:04:29 -05:00
|
|
|
{
|
2013-09-14 11:10:14 -04:00
|
|
|
InitializePAT();
|
2014-01-03 19:04:29 -05:00
|
|
|
for ( addr_t i = 0; i < PAT_NUM; i++ )
|
|
|
|
PAT2PMLFlags[i] = EncodePATAsPMLFlag(i);
|
|
|
|
}
|
|
|
|
// Otherwise, reroute all requests to the backwards compatible
|
|
|
|
// scheme. TODO: Not all early 32-bit x86 CPUs supports these
|
|
|
|
// values, so we need yet another fallback.
|
|
|
|
else
|
|
|
|
{
|
|
|
|
PAT2PMLFlags[PAT_UC] = PML_WRTHROUGH | PML_NOCACHE;
|
|
|
|
PAT2PMLFlags[PAT_WC] = PML_WRTHROUGH | PML_NOCACHE; // Approx.
|
|
|
|
PAT2PMLFlags[2] = 0; // No such flag.
|
|
|
|
PAT2PMLFlags[3] = 0; // No such flag.
|
|
|
|
PAT2PMLFlags[PAT_WT] = PML_WRTHROUGH;
|
|
|
|
PAT2PMLFlags[PAT_WP] = PML_WRTHROUGH; // Approx.
|
|
|
|
PAT2PMLFlags[PAT_WB] = 0;
|
|
|
|
PAT2PMLFlags[PAT_UCM] = PML_NOCACHE;
|
|
|
|
}
|
2011-12-23 07:09:09 -05:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Initialize CPU-specific things.
|
|
|
|
InitCPU();
|
2011-11-26 14:14:57 -05:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
typedef const multiboot_memory_map_t* mmap_t;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Loop over every detected memory region.
|
|
|
|
for (
|
|
|
|
mmap_t mmap = (mmap_t) (addr_t) bootinfo->mmap_addr;
|
|
|
|
(addr_t) mmap < bootinfo->mmap_addr + bootinfo->mmap_length;
|
|
|
|
mmap = (mmap_t) ((addr_t) mmap + mmap->size + sizeof(mmap->size))
|
|
|
|
)
|
|
|
|
{
|
|
|
|
// Check that we can use this kind of RAM.
|
|
|
|
if ( mmap->type != 1 )
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// The kernel's code may split this memory area into multiple pieces.
|
|
|
|
addr_t base = (addr_t) mmap->addr;
|
|
|
|
size_t length = Page::AlignDown(mmap->len);
|
|
|
|
|
|
|
|
#if defined(__i386__)
|
|
|
|
// Figure out if the memory area is addressable (are our pointers big enough?)
|
|
|
|
if ( 0xFFFFFFFFULL < mmap->addr )
|
|
|
|
continue;
|
|
|
|
if ( 0xFFFFFFFFULL < mmap->addr + mmap->len )
|
|
|
|
length = 0x100000000ULL - mmap->addr;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Count the amount of usable RAM (even if reserved for kernel).
|
|
|
|
Page::totalmem += length;
|
|
|
|
|
|
|
|
// Give all the physical memory to the physical memory allocator
|
|
|
|
// but make sure not to give it things we already use.
|
|
|
|
addr_t regionstart = base;
|
|
|
|
addr_t regionend = base + length;
|
|
|
|
addr_t processed = regionstart;
|
|
|
|
while ( processed < regionend )
|
|
|
|
{
|
|
|
|
addr_t lowest = processed;
|
|
|
|
addr_t highest = regionend;
|
|
|
|
|
|
|
|
// Don't allocate the kernel.
|
|
|
|
if ( lowest < kernelend )
|
2012-06-30 18:50:27 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
processed = kernelend;
|
|
|
|
continue;
|
2012-06-30 18:50:27 -04:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Don't give any of our modules to the physical page
|
|
|
|
// allocator, we'll need them.
|
|
|
|
bool continuing = false;
|
|
|
|
uint32_t* modules = (uint32_t*) (addr_t) bootinfo->mods_addr;
|
|
|
|
for ( uint32_t i = 0; i < bootinfo->mods_count; i++ )
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
size_t modsize = (size_t) (modules[2*i+1] - modules[2*i+0]);
|
|
|
|
addr_t modstart = (addr_t) modules[2*i+0];
|
|
|
|
addr_t modend = modstart + modsize;
|
|
|
|
if ( modstart <= processed && processed < modend )
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
processed = modend;
|
|
|
|
continuing = true;
|
|
|
|
break;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( lowest <= modstart && modstart < highest )
|
|
|
|
highest = modstart;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( continuing )
|
|
|
|
continue;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( highest <= lowest )
|
|
|
|
break;
|
Multithreaded kernel and improvement of signal handling.
Pardon the big ass-commit, this took months to develop and debug and the
refactoring got so far that a clean merge became impossible. The good news
is that this commit does quite a bit of cleaning up and generally improves
the kernel quality.
This makes the kernel fully pre-emptive and multithreaded. This was done
by rewriting the interrupt code, the scheduler, introducing new threading
primitives, and rewriting large parts of the kernel. During the past few
commits the kernel has had its device drivers thread secured; this commit
thread secures large parts of the core kernel. There still remains some
parts of the kernel that is _not_ thread secured, but this is not a problem
at this point. Each user-space thread has an associated kernel stack that
it uses when it goes into kernel mode. This stack is by default 8 KiB since
that value works for me and is also used by Linux. Strange things tends to
happen on x86 in case of a stack overflow - there is no ideal way to catch
such a situation right now.
The system call conventions were changed, too. The %edx register is now
used to provide the errno value of the call, instead of the kernel writing
it into a registered global variable. The system call code has also been
updated to better reflect the native calling conventions: not all registers
have to be preserved. This makes system calls faster and simplifies the
assembly. In the kernel, there is no longer the event.h header or the hacky
method of 'resuming system calls' that closely resembles cooperative
multitasking. If a system call wants to block, it should just block.
The signal handling was also improved significantly. At this point, signals
cannot interrupt kernel threads (but can always interrupt user-space threads
if enabled), which introduces some problems with how a SIGINT could
interrupt a blocking read, for instance. This commit introduces and uses a
number of new primitives such as kthread_lock_mutex_signal() that attempts
to get the lock but fails if a signal is pending. In this manner, the kernel
is safer as kernel threads cannot be shut down inconveniently, but in return
for complexity as blocking operations must check they if they should fail.
Process exiting has also been refactored significantly. The _exit(2) system
call sets the exit code and sends SIGKILL to all the threads in the process.
Once all the threads have cleaned themselves up and exited, a worker thread
calls the process's LastPrayer() method that unmaps memory, deletes the
address space, notifies the parent, etc. This provides a very robust way to
terminate processes as even half-constructed processes (during a failing fork
for instance) can be gracefully terminated.
I have introduced a number of kernel threads to help avoid threading problems
and simplify kernel design. For instance, there is now a functional generic
kernel worker thread that any kernel thread can schedule jobs for. Interrupt
handlers run with interrupts off (hence they cannot call kthread_ functions
as it may deadlock the system if another thread holds the lock) therefore
they cannot use the standard kernel worker threads. Instead, they use a
special purpose interrupt worker thread that works much like the generic one
expect that interrupt handlers can safely queue work with interrupts off.
Note that this also means that interrupt handlers cannot allocate memory or
print to the kernel log/screen as such mechanisms uses locks. I'll introduce
a lock free algorithm for such cases later on.
The boot process has also changed. The original kernel init thread in
kernel.cpp creates a new bootstrap thread and becomes the system idle thread.
Note that pid=0 now means the kernel, as there is no longer a system idle
process. The bootstrap thread launches all the kernel worker threads and then
creates a new process and loads /bin/init into it and then creates a thread
in pid=1, which starts the system. The bootstrap thread then quietly waits
for pid=1 to exit after which it shuts down/reboots/panics the system.
In general, the introduction of race conditions and dead locks have forced me
to revise a lot of the design and make sure it was thread secure. Since early
parts of the kernel was quite hacky, I had to refactor such code. So it seems
that the risk of dead locks forces me to write better code.
Note that a real preemptive multithreaded kernel simplifies the construction
of blocking system calls. My hope is that this will trigger a clean up of
the filesystem code that current is almost beyond repair.
Almost all of the kernel was modified during this refactoring. To the extent
possible, these changes have been backported to older non-multithreaded
kernel, but many changes were tightly coupled and went into this commit.
Of interest is the implementation of the kthread_ api based on the design
of pthreads; this library allows easy synchronization mechanisms and
includes C++-style scoped locks. This commit also introduces new worker
threads and tested mechanisms for interrupt handlers to schedule work in a
kernel worker thread.
A lot of code have been rewritten from scratch and has become a lot more
stable and correct.
Share and enjoy!
2012-08-01 11:30:34 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Now that we have a continious area not used by anything,
|
|
|
|
// let's forward it to the physical page allocator.
|
|
|
|
lowest = Page::AlignUp(lowest);
|
|
|
|
highest = Page::AlignUp(highest);
|
|
|
|
size_t size = highest - lowest;
|
|
|
|
Page::InitPushRegion(lowest, size);
|
|
|
|
processed = highest;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// If the physical allocator couldn't handle the vast amount of
|
|
|
|
// physical pages, it may decide to drop some. This shouldn't happen
|
|
|
|
// until the pebibyte era of RAM.
|
|
|
|
if ( 0 < Page::pagesnotonstack )
|
|
|
|
Log::PrintF("%zu bytes of RAM aren't used due to technical "
|
2014-02-20 10:48:28 -05:00
|
|
|
"restrictions.\n", (size_t) (Page::pagesnotonstack * 0x1000UL));
|
2011-11-26 14:14:57 -05:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
Memory::Unmap(0x0); // Remove NULL.
|
|
|
|
|
|
|
|
// Finish allocating the top level PMLs for the kernels use.
|
|
|
|
AllocateKernelPMLs();
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void Statistics(size_t* amountused, size_t* totalmem)
|
|
|
|
{
|
|
|
|
size_t memfree = (Page::stackused - Page::stackreserved) << 12UL;
|
|
|
|
size_t memused = Page::totalmem - memfree;
|
|
|
|
if ( amountused )
|
|
|
|
*amountused = memused;
|
|
|
|
if ( totalmem )
|
|
|
|
*totalmem = Page::totalmem;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Prepare the non-forkable kernel PMLs such that forking the kernel
|
|
|
|
// address space will always keep the kernel mapped.
|
|
|
|
void AllocateKernelPMLs()
|
|
|
|
{
|
|
|
|
const addr_t flags = PML_PRESENT | PML_WRITABLE;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
PML* const pml = PMLS[TOPPMLLEVEL];
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
size_t start = ENTRIES / 2;
|
|
|
|
size_t end = ENTRIES;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
for ( size_t i = start; i < end; i++ )
|
|
|
|
{
|
|
|
|
if ( pml->entry[i] & PML_PRESENT )
|
|
|
|
continue;
|
Fixed two very nasty bugs in the x86 memory management code.
1) The PML2 was not initialized to zeroes, thus leaving some bits behind that
caused the fork code to go crazy, forking the unforkable, and mapping addresses
that never, ever, should have been mapped, leaving behind a trail of page faults
and general protection faults on some computers, while other computers worked
because the uninitalized memory just wasn't uninitialized enough. Yep, this was
a schrödinbug!
2) Fixed a time bomb. The kernel heap was accidentally put such that whenever a
few megabytes were allocated, it would begin overwriting the physical page stack
causing unthinkable events to unfold and would probably be even more obscure to
debug than 1).
Oh, and some string errors fixed and removed RunApplication from kernel.cpp,
funny thing that even linked in the first place. Guess, the optimizer actually
did work for once. :)
2011-10-19 21:40:37 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t page = Page::Get();
|
|
|
|
if ( !page )
|
|
|
|
Panic("out of memory allocating boot PMLs");
|
|
|
|
|
|
|
|
pml->entry[i] = page | flags;
|
|
|
|
|
|
|
|
// Invalidate the new PML and reset it to zeroes.
|
|
|
|
addr_t pmladdr = (addr_t) (PMLS[TOPPMLLEVEL-1] + i);
|
|
|
|
InvalidatePage(pmladdr);
|
|
|
|
memset((void*) pmladdr, 0, sizeof(PML));
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
} // namespace Memory
|
|
|
|
} // namespace Sortix
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
namespace Sortix {
|
|
|
|
namespace Page {
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void ExtendStack()
|
|
|
|
{
|
|
|
|
// This call will always succeed, if it didn't, then the stack
|
|
|
|
// wouldn't be full, and thus this function won't be called.
|
|
|
|
addr_t page = GetUnlocked();
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// This call will also succeed, since there are plenty of physical
|
|
|
|
// pages available and it might need some.
|
|
|
|
addr_t virt = (addr_t) (STACK + stacklength);
|
|
|
|
if ( !Memory::Map(page, virt, PROT_KREAD | PROT_KWRITE) )
|
|
|
|
Panic("Unable to extend page stack, which should have worked");
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// TODO: This may not be needed during the boot process!
|
|
|
|
//Memory::InvalidatePage((addr_t) (STACK + stacklength));
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
stacklength += 4096UL / sizeof(addr_t);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void InitPushRegion(addr_t position, size_t length)
|
|
|
|
{
|
|
|
|
// Align our entries on page boundaries.
|
|
|
|
addr_t newposition = Page::AlignUp(position);
|
|
|
|
length = Page::AlignDown((position + length) - newposition);
|
|
|
|
position = newposition;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
while ( length )
|
|
|
|
{
|
|
|
|
if ( unlikely(stackused == stacklength) )
|
|
|
|
{
|
|
|
|
if ( stackused == MAXSTACKLENGTH )
|
|
|
|
{
|
|
|
|
pagesnotonstack += length / 4096UL;
|
|
|
|
return;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
ExtendStack();
|
2012-03-21 12:19:26 -04:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t* stackentry = &(STACK[stackused++]);
|
|
|
|
*stackentry = position;
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
length -= 4096UL;
|
|
|
|
position += 4096UL;
|
|
|
|
}
|
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool ReserveUnlocked(size_t* counter, size_t least, size_t ideal)
|
|
|
|
{
|
|
|
|
assert(least < ideal);
|
|
|
|
size_t available = stackused - stackreserved;
|
|
|
|
if ( least < available )
|
|
|
|
return errno = ENOMEM, false;
|
|
|
|
if ( available < ideal )
|
|
|
|
ideal = available;
|
|
|
|
stackreserved += ideal;
|
|
|
|
*counter += ideal;
|
|
|
|
return true;
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool Reserve(size_t* counter, size_t least, size_t ideal)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
return ReserveUnlocked(counter, least, ideal);
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool ReserveUnlocked(size_t* counter, size_t amount)
|
|
|
|
{
|
|
|
|
return ReserveUnlocked(counter, amount, amount);
|
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool Reserve(size_t* counter, size_t amount)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
return ReserveUnlocked(counter, amount);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t GetReservedUnlocked(size_t* counter)
|
|
|
|
{
|
|
|
|
if ( !*counter )
|
|
|
|
return 0;
|
|
|
|
assert(stackused); // After all, we did _reserve_ the memory.
|
|
|
|
addr_t result = STACK[--stackused];
|
|
|
|
assert(result == AlignDown(result));
|
|
|
|
stackreserved--;
|
|
|
|
(*counter)--;
|
|
|
|
return result;
|
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t GetReserved(size_t* counter)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
return GetReservedUnlocked(counter);
|
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t GetUnlocked()
|
|
|
|
{
|
|
|
|
assert(stackreserved <= stackused);
|
|
|
|
if ( unlikely(stackreserved == stackused) )
|
|
|
|
return errno = ENOMEM, 0;
|
|
|
|
addr_t result = STACK[--stackused];
|
|
|
|
assert(result == AlignDown(result));
|
|
|
|
return result;
|
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t Get()
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
return GetUnlocked();
|
|
|
|
}
|
2012-08-01 12:50:32 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void PutUnlocked(addr_t page)
|
|
|
|
{
|
|
|
|
assert(page == AlignDown(page));
|
|
|
|
if ( unlikely(stackused == stacklength) )
|
|
|
|
{
|
|
|
|
if ( stackused == MAXSTACKLENGTH )
|
2012-08-01 12:50:32 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
pagesnotonstack++;
|
|
|
|
return;
|
2012-08-01 12:50:32 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
ExtendStack();
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
STACK[stackused++] = page;
|
|
|
|
}
|
2012-03-18 21:39:11 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void Put(addr_t page)
|
|
|
|
{
|
|
|
|
ScopedLock lock(&pagelock);
|
|
|
|
PutUnlocked(page);
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void Lock()
|
|
|
|
{
|
|
|
|
kthread_mutex_lock(&pagelock);
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void Unlock()
|
|
|
|
{
|
|
|
|
kthread_mutex_unlock(&pagelock);
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
} // namespace Page
|
|
|
|
} // namespace Sortix
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
namespace Sortix {
|
|
|
|
namespace Memory {
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t ProtectionToPMLFlags(int prot)
|
|
|
|
{
|
|
|
|
addr_t result = 0;
|
|
|
|
if ( prot & PROT_EXEC ) { result |= PML_USERSPACE; }
|
|
|
|
if ( prot & PROT_READ ) { result |= PML_USERSPACE; }
|
|
|
|
if ( prot & PROT_WRITE ) { result |= PML_USERSPACE | PML_WRITABLE; }
|
|
|
|
if ( prot & PROT_KEXEC ) { result |= 0; }
|
|
|
|
if ( prot & PROT_KREAD ) { result |= 0; }
|
|
|
|
if ( prot & PROT_KWRITE ) { result |= 0; }
|
|
|
|
if ( prot & PROT_FORK ) { result |= PML_FORK; }
|
|
|
|
return result;
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
int PMLFlagsToProtection(addr_t flags)
|
|
|
|
{
|
|
|
|
int prot = PROT_KREAD | PROT_KWRITE | PROT_KEXEC;
|
|
|
|
bool user = flags & PML_USERSPACE;
|
|
|
|
bool write = flags & PML_WRITABLE;
|
|
|
|
if ( user )
|
|
|
|
prot |= PROT_EXEC | PROT_READ;
|
|
|
|
if ( user && write )
|
|
|
|
prot |= PROT_WRITE;
|
|
|
|
return prot;
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
int ProvidedProtection(int prot)
|
|
|
|
{
|
|
|
|
addr_t flags = ProtectionToPMLFlags(prot);
|
|
|
|
return PMLFlagsToProtection(flags);
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool LookUp(addr_t mapto, addr_t* physical, int* protection)
|
|
|
|
{
|
|
|
|
// Translate the virtual address into PML indexes.
|
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
pmlchildid[i] = mapto >> (12 + (i-1) * TRANSBITS) & MASK;
|
2013-01-12 09:16:19 -05:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
int prot = PROT_USER | PROT_KERNEL | PROT_FORK;
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// For each PML level, make sure it exists.
|
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
|
|
|
|
|
|
|
addr_t entry = pml->entry[childid];
|
|
|
|
if ( !(entry & PML_PRESENT) )
|
|
|
|
return false;
|
|
|
|
int entryflags = entry & PML_ADDRESS;
|
|
|
|
int entryprot = PMLFlagsToProtection(entryflags);
|
|
|
|
prot &= entryprot;
|
|
|
|
|
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
|
|
|
offset = offset * ENTRIES + childid;
|
|
|
|
}
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t entry = (PMLS[1] + offset)->entry[pmlchildid[1]];
|
|
|
|
if ( !(entry & PML_PRESENT) )
|
|
|
|
return false;
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
int entryflags = entry & PML_ADDRESS;
|
|
|
|
int entryprot = PMLFlagsToProtection(entryflags);
|
|
|
|
prot &= entryprot;
|
|
|
|
addr_t phys = entry & PML_ADDRESS;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( physical )
|
|
|
|
*physical = phys;
|
|
|
|
if ( protection )
|
|
|
|
*protection = prot;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
return true;
|
|
|
|
}
|
2012-04-09 08:15:40 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void InvalidatePage(addr_t /*addr*/)
|
|
|
|
{
|
|
|
|
// TODO: Actually just call the instruction.
|
|
|
|
Flush();
|
|
|
|
}
|
Fixed the horrible 'nofoo' bug!
When compiled with gcc 4.6.1, 32-bit Sortix would triple fault during
early boot: When the TLB is being flushed, somehow a garbage value had
sneaked into Sortix::Memory::currentdir, and a non-page aligned (and
garbage) page directory is loaded. (Triple fault, here we come!)
However, adding a volatile addr_t foo after the currentdir variable
actually caused the system to boot correctly - the garbage was written
into that variable instead. To debug the problem, I set the foo value
to 0: as long as !foo (hence the name nofoo) everything was alright.
After closer examination I found that the initrd open code wrote to a
pointer supplied by kernel.cpp. The element pointed to was on the
stack. Worse, its address was the same as currentdir (now foo).
Indeed, the stack had gone into the kernel's data segment!
Turns out that this gcc configuration stores variables in the data
segment in the reverse order they are defined in, whereas previous
compilers did the opposite. The hack used to set up the stack during
early boot relied on this (now obviously incorrect) fact.
In effect, the stack was initialized to the end of the stack, not
the start of it: completely ignoring all the nice stack space
allocated in kernel.cpp.
I did not see that one coming.
2011-12-24 21:33:12 -05:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Flushes the Translation Lookaside Buffer (TLB).
|
|
|
|
void Flush()
|
|
|
|
{
|
|
|
|
asm volatile("mov %0, %%cr3":: "r"(currentdir));
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t GetAddressSpace()
|
|
|
|
{
|
|
|
|
return currentdir;
|
|
|
|
}
|
2011-11-20 18:27:10 -05:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t SwitchAddressSpace(addr_t addrspace)
|
|
|
|
{
|
|
|
|
// Have fun debugging this.
|
|
|
|
if ( currentdir != Page::AlignDown(currentdir) )
|
|
|
|
PanicF("The variable containing the current address space "
|
|
|
|
"contains garbage all of sudden: it isn't page-aligned. "
|
|
|
|
"It contains the value 0x%zx.", currentdir);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Don't switch if we are already there.
|
|
|
|
if ( addrspace == currentdir )
|
|
|
|
return currentdir;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( addrspace & 0xFFFUL )
|
|
|
|
PanicF("addrspace 0x%zx was not page-aligned!", addrspace);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t previous = currentdir;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Switch and flush the TLB.
|
|
|
|
asm volatile("mov %0, %%cr3":: "r"(addrspace));
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
currentdir = addrspace;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
return previous;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool MapRange(addr_t where, size_t bytes, int protection)
|
|
|
|
{
|
|
|
|
for ( addr_t page = where; page < where + bytes; page += 4096UL )
|
|
|
|
{
|
|
|
|
addr_t physicalpage = Page::Get();
|
|
|
|
if ( physicalpage == 0 )
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
while ( where < page )
|
2011-10-02 09:58:08 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
page -= 4096UL;
|
|
|
|
physicalpage = Unmap(page);
|
2011-10-02 09:58:08 -04:00
|
|
|
Page::Put(physicalpage);
|
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
return false;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
Map(physicalpage, page, protection);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool UnmapRange(addr_t where, size_t bytes)
|
|
|
|
{
|
|
|
|
for ( addr_t page = where; page < where + bytes; page += 4096UL )
|
|
|
|
{
|
|
|
|
addr_t physicalpage = Unmap(page);
|
|
|
|
Page::Put(physicalpage);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
static bool MapInternal(addr_t physical, addr_t mapto, int prot, addr_t extraflags = 0)
|
|
|
|
{
|
|
|
|
addr_t flags = ProtectionToPMLFlags(prot) | PML_PRESENT;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Translate the virtual address into PML indexes.
|
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
pmlchildid[i] = mapto >> (12 + (i-1) * TRANSBITS) & MASK;
|
2011-11-28 19:21:59 -05:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// For each PML level, make sure it exists.
|
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t& entry = pml->entry[childid];
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
|
|
|
size_t childoffset = offset * ENTRIES + childid;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( !(entry & PML_PRESENT) )
|
2012-03-21 12:19:26 -04:00
|
|
|
{
|
2014-01-03 19:04:29 -05:00
|
|
|
// TODO: Possible memory leak when page allocation fails.
|
|
|
|
addr_t page = Page::Get();
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( !page )
|
|
|
|
return false;
|
|
|
|
addr_t pmlflags = PML_PRESENT | PML_WRITABLE | PML_USERSPACE
|
|
|
|
| PML_FORK;
|
|
|
|
entry = page | pmlflags;
|
2012-03-21 12:19:26 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Invalidate the new PML and reset it to zeroes.
|
|
|
|
addr_t pmladdr = (addr_t) (PMLS[i-1] + childoffset);
|
|
|
|
InvalidatePage(pmladdr);
|
|
|
|
memset((void*) pmladdr, 0, sizeof(PML));
|
2012-03-21 12:19:26 -04:00
|
|
|
}
|
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
offset = childoffset;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Actually map the physical page to the virtual page.
|
|
|
|
const addr_t entry = physical | flags | extraflags;
|
|
|
|
(PMLS[1] + offset)->entry[pmlchildid[1]] = entry;
|
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool Map(addr_t physical, addr_t mapto, int prot)
|
|
|
|
{
|
|
|
|
return MapInternal(physical, mapto, prot);
|
|
|
|
}
|
|
|
|
|
|
|
|
void PageProtect(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
|
|
|
if ( !LookUp(mapto, &phys, NULL) )
|
|
|
|
return;
|
|
|
|
Map(phys, mapto, protection);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void PageProtectAdd(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
|
|
|
int prot;
|
|
|
|
if ( !LookUp(mapto, &phys, &prot) )
|
|
|
|
return;
|
|
|
|
prot |= protection;
|
|
|
|
Map(phys, mapto, prot);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void PageProtectSub(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
|
|
|
int prot;
|
|
|
|
if ( !LookUp(mapto, &phys, &prot) )
|
|
|
|
return;
|
|
|
|
prot &= ~protection;
|
|
|
|
Map(phys, mapto, prot);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t Unmap(addr_t mapto)
|
|
|
|
{
|
|
|
|
// Translate the virtual address into PML indexes.
|
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
{
|
|
|
|
pmlchildid[i] = mapto >> (12 + (i-1) * TRANSBITS) & MASK;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// For each PML level, make sure it exists.
|
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t& entry = pml->entry[childid];
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( !(entry & PML_PRESENT) )
|
2014-02-20 10:48:28 -05:00
|
|
|
PanicF("Attempted to unmap virtual page 0x%jX, but the virtual"
|
2014-01-03 19:04:29 -05:00
|
|
|
" page was wasn't mapped. This is a bug in the code "
|
2014-02-20 10:48:28 -05:00
|
|
|
"code calling this function", (uintmax_t) mapto);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
|
|
|
offset = offset * ENTRIES + childid;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t& entry = (PMLS[1] + offset)->entry[pmlchildid[1]];
|
|
|
|
addr_t result = entry & PML_ADDRESS;
|
|
|
|
entry = 0;
|
2012-03-18 21:39:11 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// TODO: If all the entries in PML[N] are not-present, then who
|
|
|
|
// unmaps its entry from PML[N-1]?
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MapPAT(addr_t physical, addr_t mapto, int prot, addr_t mtype)
|
|
|
|
{
|
|
|
|
addr_t extraflags = PAT2PMLFlags[mtype];
|
|
|
|
return MapInternal(physical, mapto, prot, extraflags);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
void ForkCleanup(size_t i, size_t level)
|
|
|
|
{
|
|
|
|
PML* destpml = FORKPML + level;
|
|
|
|
if ( !i )
|
|
|
|
return;
|
|
|
|
for ( size_t n = 0; n < i-1; n++ )
|
|
|
|
{
|
|
|
|
addr_t entry = destpml->entry[i];
|
|
|
|
if ( !(entry & PML_FORK ) )
|
|
|
|
continue;
|
|
|
|
addr_t phys = entry & PML_ADDRESS;
|
|
|
|
if ( 1 < level )
|
|
|
|
{
|
|
|
|
addr_t destaddr = (addr_t) (FORKPML + level-1);
|
|
|
|
Map(phys, destaddr, PROT_KREAD | PROT_KWRITE);
|
|
|
|
InvalidatePage(destaddr);
|
|
|
|
ForkCleanup(ENTRIES+1UL, level-1);
|
|
|
|
}
|
|
|
|
Page::Put(phys);
|
|
|
|
}
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// TODO: Copying every frame is endlessly useless in many uses. It'd be
|
|
|
|
// nice to upgrade this to a copy-on-write algorithm.
|
|
|
|
bool Fork(size_t level, size_t pmloffset)
|
|
|
|
{
|
|
|
|
PML* destpml = FORKPML + level;
|
|
|
|
for ( size_t i = 0; i < ENTRIES; i++ )
|
|
|
|
{
|
|
|
|
addr_t entry = (PMLS[level] + pmloffset)->entry[i];
|
2012-03-18 21:39:11 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Link the entry if it isn't supposed to be forked.
|
|
|
|
if ( !(entry & PML_FORK ) )
|
|
|
|
{
|
|
|
|
destpml->entry[i] = entry;
|
|
|
|
continue;
|
|
|
|
}
|
2012-03-18 21:39:11 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t phys = Page::Get();
|
|
|
|
if ( unlikely(!phys) )
|
|
|
|
{
|
|
|
|
ForkCleanup(i, level);
|
|
|
|
return false;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
addr_t flags = entry & PML_FLAGS;
|
|
|
|
destpml->entry[i] = phys | flags;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Map the destination page.
|
|
|
|
addr_t destaddr = (addr_t) (FORKPML + level-1);
|
|
|
|
Map(phys, destaddr, PROT_KREAD | PROT_KWRITE);
|
|
|
|
InvalidatePage(destaddr);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
size_t offset = pmloffset * ENTRIES + i;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
if ( 1 < level )
|
|
|
|
{
|
|
|
|
if ( !Fork(level-1, offset) )
|
|
|
|
{
|
|
|
|
Page::Put(phys);
|
|
|
|
ForkCleanup(i, level);
|
|
|
|
return false;
|
2012-03-18 21:39:11 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
continue;
|
2012-03-18 21:39:11 -04:00
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Determine the source page's address.
|
|
|
|
const void* src = (const void*) (offset * 4096UL);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// Determine the destination page's address.
|
|
|
|
void* dest = (void*) (FORKPML + level - 1);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
memcpy(dest, src, 4096UL);
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
bool Fork(addr_t dir, size_t level, size_t pmloffset)
|
|
|
|
{
|
|
|
|
PML* destpml = FORKPML + level;
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
// This call always succeeds.
|
|
|
|
Map(dir, (addr_t) destpml, PROT_KREAD | PROT_KWRITE);
|
|
|
|
InvalidatePage((addr_t) destpml);
|
2011-10-02 09:58:08 -04:00
|
|
|
|
2014-01-03 19:04:29 -05:00
|
|
|
return Fork(level, pmloffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an exact copy of the current address space.
|
|
|
|
addr_t Fork()
|
|
|
|
{
|
|
|
|
addr_t dir = Page::Get();
|
|
|
|
if ( dir == 0 )
|
|
|
|
return 0;
|
|
|
|
if ( !Fork(dir, TOPPMLLEVEL, 0) )
|
|
|
|
{
|
|
|
|
Page::Put(dir);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now, the new top pml needs to have its fractal memory fixed.
|
|
|
|
const addr_t flags = PML_PRESENT | PML_WRITABLE;
|
|
|
|
addr_t mapto;
|
|
|
|
addr_t childaddr;
|
|
|
|
|
|
|
|
(FORKPML + TOPPMLLEVEL)->entry[ENTRIES-1] = dir | flags;
|
|
|
|
childaddr = (FORKPML + TOPPMLLEVEL)->entry[ENTRIES-2] & PML_ADDRESS;
|
|
|
|
|
|
|
|
for ( size_t i = TOPPMLLEVEL-1; i > 0; i-- )
|
|
|
|
{
|
|
|
|
mapto = (addr_t) (FORKPML + i);
|
|
|
|
Map(childaddr, mapto, PROT_KREAD | PROT_KWRITE);
|
|
|
|
InvalidatePage(mapto);
|
|
|
|
(FORKPML + i)->entry[ENTRIES-1] = dir | flags;
|
|
|
|
childaddr = (FORKPML + i)->entry[ENTRIES-2] & PML_ADDRESS;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
return dir;
|
2011-10-02 09:58:08 -04:00
|
|
|
}
|
2014-01-03 19:04:29 -05:00
|
|
|
|
|
|
|
} // namespace Memory
|
|
|
|
} // namespace Sortix
|