2012-03-21 17:19:26 +01:00
|
|
|
/*******************************************************************************
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012.
|
2011-10-02 15:58:08 +02:00
|
|
|
|
|
|
|
This file is part of Sortix.
|
|
|
|
|
|
|
|
Sortix is free software: you can redistribute it and/or modify it under the
|
|
|
|
terms of the GNU General Public License as published by the Free Software
|
|
|
|
Foundation, either version 3 of the License, or (at your option) any later
|
|
|
|
version.
|
|
|
|
|
|
|
|
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
|
|
|
details.
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
|
|
Sortix. If not, see <http://www.gnu.org/licenses/>.
|
2011-10-02 15:58:08 +02:00
|
|
|
|
|
|
|
memorymanagement.cpp
|
|
|
|
Handles memory for the x86 family of architectures.
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
*******************************************************************************/
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-22 00:52:29 +01:00
|
|
|
#include <sortix/kernel/platform.h>
|
2011-12-16 13:24:49 +01:00
|
|
|
#include <libmaxsi/error.h>
|
2011-10-02 15:58:08 +02:00
|
|
|
#include <libmaxsi/memory.h>
|
|
|
|
#include "multiboot.h"
|
2012-03-22 00:52:29 +01:00
|
|
|
#include <sortix/kernel/panic.h>
|
2012-03-21 17:19:26 +01:00
|
|
|
#include <sortix/mman.h>
|
2012-03-22 00:52:29 +01:00
|
|
|
#include <sortix/kernel/memorymanagement.h>
|
2011-10-02 15:58:08 +02:00
|
|
|
#include "memorymanagement.h"
|
2011-11-26 20:14:57 +01:00
|
|
|
#include "syscall.h"
|
2012-07-01 00:50:27 +02:00
|
|
|
#include "msr.h"
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2011-12-16 13:24:49 +01:00
|
|
|
using namespace Maxsi;
|
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
namespace Sortix
|
|
|
|
{
|
2011-12-23 13:09:09 +01:00
|
|
|
extern size_t end;
|
2011-10-02 15:58:08 +02:00
|
|
|
|
|
|
|
namespace Page
|
|
|
|
{
|
|
|
|
void InitPushRegion(addr_t position, size_t length);
|
|
|
|
size_t pagesnotonstack;
|
|
|
|
size_t stackused;
|
2012-03-21 17:19:26 +01:00
|
|
|
size_t stackreserved;
|
2011-10-02 15:58:08 +02:00
|
|
|
size_t stacklength;
|
2011-11-26 20:14:57 +01:00
|
|
|
size_t totalmem;
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace Memory
|
|
|
|
{
|
2012-04-09 14:15:40 +02:00
|
|
|
addr_t currentdir = 0;
|
2011-10-02 15:58:08 +02:00
|
|
|
|
|
|
|
void InitCPU();
|
|
|
|
void AllocateKernelPMLs();
|
2011-11-26 20:14:57 +01:00
|
|
|
int SysMemStat(size_t* memused, size_t* memtotal);
|
2012-07-01 00:50:27 +02:00
|
|
|
addr_t PAT2PMLFlags[PAT_NUM];
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
void InitCPU(multiboot_info_t* bootinfo)
|
2011-10-02 15:58:08 +02:00
|
|
|
{
|
2011-12-23 13:09:09 +01:00
|
|
|
const size_t MAXKERNELEND = 0x400000UL; /* 4 MiB */
|
|
|
|
addr_t kernelend = Page::AlignUp((addr_t) &end);
|
|
|
|
if ( MAXKERNELEND < kernelend )
|
|
|
|
{
|
|
|
|
Log::PrintF("Warning: The kernel is too big! It ends at 0x%zx, "
|
|
|
|
"but the highest ending address supported is 0x%zx. "
|
|
|
|
"The system may not boot correctly.\n", kernelend,
|
|
|
|
MAXKERNELEND);
|
|
|
|
}
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
Page::stackreserved = 0;
|
2011-10-02 15:58:08 +02:00
|
|
|
Page::pagesnotonstack = 0;
|
2011-11-26 20:14:57 +01:00
|
|
|
Page::totalmem = 0;
|
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
if ( !( bootinfo->flags & MULTIBOOT_INFO_MEM_MAP ) )
|
|
|
|
{
|
|
|
|
Panic("memorymanagement.cpp: The memory map flag was't set in "
|
|
|
|
"the multiboot structure. Are your bootloader multiboot "
|
|
|
|
"specification compliant?");
|
|
|
|
}
|
|
|
|
|
2012-07-01 00:50:27 +02:00
|
|
|
// If supported, setup the Page Attribute Table feature that allows
|
|
|
|
// us to control the memory type (caching) of memory more precisely.
|
|
|
|
if ( MSR::IsPATSupported() )
|
|
|
|
{
|
|
|
|
MSR::InitializePAT();
|
|
|
|
for ( addr_t i = 0; i < PAT_NUM; i++ )
|
|
|
|
PAT2PMLFlags[i] = EncodePATAsPMLFlag(i);
|
|
|
|
}
|
|
|
|
// Otherwise, reroute all requests to the backwards compatible
|
|
|
|
// scheme. TODO: Not all early 32-bit x86 CPUs supports these
|
|
|
|
// values, so we need yet another fallback.
|
|
|
|
else
|
|
|
|
{
|
|
|
|
PAT2PMLFlags[PAT_UC] = PML_WRTHROUGH | PML_NOCACHE;
|
|
|
|
PAT2PMLFlags[PAT_WC] = PML_WRTHROUGH | PML_NOCACHE; // Approx.
|
|
|
|
PAT2PMLFlags[2] = 0; // No such flag.
|
|
|
|
PAT2PMLFlags[3] = 0; // No such flag.
|
|
|
|
PAT2PMLFlags[PAT_WT] = PML_WRTHROUGH;
|
|
|
|
PAT2PMLFlags[PAT_WP] = PML_WRTHROUGH; // Approx.
|
|
|
|
PAT2PMLFlags[PAT_WB] = 0;
|
|
|
|
PAT2PMLFlags[PAT_UCM] = PML_NOCACHE;
|
|
|
|
}
|
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
// Initialize CPU-specific things.
|
|
|
|
InitCPU();
|
|
|
|
|
|
|
|
typedef const multiboot_memory_map_t* mmap_t;
|
|
|
|
|
|
|
|
// Loop over every detected memory region.
|
2011-10-10 20:14:37 +02:00
|
|
|
for (
|
2012-03-21 17:19:26 +01:00
|
|
|
mmap_t mmap = (mmap_t) (addr_t) bootinfo->mmap_addr;
|
2011-10-10 20:14:37 +02:00
|
|
|
(addr_t) mmap < bootinfo->mmap_addr + bootinfo->mmap_length;
|
|
|
|
mmap = (mmap_t) ((addr_t) mmap + mmap->size + sizeof(mmap->size))
|
|
|
|
)
|
2011-10-02 15:58:08 +02:00
|
|
|
{
|
|
|
|
// Check that we can use this kind of RAM.
|
|
|
|
if ( mmap->type != 1 ) { continue; }
|
|
|
|
|
2011-10-22 15:17:58 +02:00
|
|
|
// The kernel's code may split this memory area into multiple pieces.
|
2011-10-02 15:58:08 +02:00
|
|
|
addr_t base = (addr_t) mmap->addr;
|
2011-12-23 16:45:07 +01:00
|
|
|
size_t length = Page::AlignDown(mmap->len);
|
2011-10-02 15:58:08 +02:00
|
|
|
|
|
|
|
#ifdef PLATFORM_X86
|
|
|
|
// Figure out if the memory area is addressable (are our pointers big enough?)
|
|
|
|
if ( 0xFFFFFFFFULL < mmap->addr ) { continue; }
|
|
|
|
if ( 0xFFFFFFFFULL < mmap->addr + mmap->len ) { length = 0x100000000ULL - mmap->addr; }
|
|
|
|
#endif
|
|
|
|
|
2011-11-26 20:14:57 +01:00
|
|
|
// Count the amount of usable RAM (even if reserved for kernel).
|
|
|
|
Page::totalmem += length;
|
|
|
|
|
2011-12-22 14:13:18 +01:00
|
|
|
// Give all the physical memory to the physical memory allocator
|
|
|
|
// but make sure not to give it things we already use.
|
|
|
|
addr_t regionstart = mmap->addr;
|
|
|
|
addr_t regionend = mmap->addr + mmap->len;
|
|
|
|
addr_t processed = regionstart;
|
|
|
|
while ( processed < regionend )
|
2011-10-02 15:58:08 +02:00
|
|
|
{
|
2011-12-22 14:13:18 +01:00
|
|
|
addr_t lowest = processed;
|
|
|
|
addr_t highest = regionend;
|
|
|
|
|
|
|
|
// Don't allocate the kernel.
|
2011-12-23 13:09:09 +01:00
|
|
|
if ( lowest < kernelend ) { processed = kernelend; continue; }
|
2011-12-22 14:13:18 +01:00
|
|
|
|
|
|
|
// Don't give any of our modules to the physical page
|
|
|
|
// allocator, we'll need them.
|
|
|
|
bool continuing = false;
|
2012-03-21 17:19:26 +01:00
|
|
|
uint32_t* modules = (uint32_t*) (addr_t) bootinfo->mods_addr;
|
2011-12-22 14:13:18 +01:00
|
|
|
for ( uint32_t i = 0; i < bootinfo->mods_count; i++ )
|
|
|
|
{
|
|
|
|
size_t modsize = (size_t) (modules[2*i+1] - modules[2*i+0]);
|
|
|
|
addr_t modstart = (addr_t) modules[2*i+0];
|
|
|
|
addr_t modend = modstart + modsize;
|
|
|
|
if ( modstart <= processed && processed < modend )
|
|
|
|
{
|
|
|
|
processed = modend;
|
|
|
|
continuing = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if ( lowest <= modstart && modstart < highest )
|
|
|
|
{
|
|
|
|
highest = modstart;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( continuing ) { continue; }
|
|
|
|
|
|
|
|
if ( highest <= lowest ) { break; }
|
|
|
|
|
|
|
|
// Now that we have a continious area not used by anything,
|
|
|
|
// let's forward it to the physical page allocator.
|
|
|
|
lowest = Page::AlignUp(lowest);
|
|
|
|
highest = Page::AlignUp(highest);
|
|
|
|
size_t size = highest - lowest;
|
|
|
|
Page::InitPushRegion(lowest, size);
|
|
|
|
processed = highest;
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the physical allocator couldn't handle the vast amount of
|
|
|
|
// physical pages, it may decide to drop some. This shouldn't happen
|
|
|
|
// until the pebibyte era of RAM.
|
|
|
|
if ( 0 < Page::pagesnotonstack )
|
|
|
|
{
|
2011-11-29 01:21:59 +01:00
|
|
|
Log::PrintF("%zu bytes of RAM aren't used due to technical "
|
2011-10-02 15:58:08 +02:00
|
|
|
"restrictions.\n", Page::pagesnotonstack * 0x1000UL);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finish allocating the top level PMLs for the kernels use.
|
|
|
|
AllocateKernelPMLs();
|
|
|
|
}
|
|
|
|
|
2011-11-26 20:14:57 +01:00
|
|
|
void Statistics(size_t* amountused, size_t* totalmem)
|
|
|
|
{
|
2012-03-21 17:19:26 +01:00
|
|
|
size_t memfree = (Page::stackused - Page::stackreserved) << 12UL;
|
2011-12-23 16:45:07 +01:00
|
|
|
size_t memused = Page::totalmem - memfree;
|
|
|
|
if ( amountused ) { *amountused = memused; }
|
2011-11-26 20:14:57 +01:00
|
|
|
if ( totalmem ) { *totalmem = Page::totalmem; }
|
|
|
|
}
|
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
// Prepare the non-forkable kernel PMLs such that forking the kernel
|
|
|
|
// address space will always keep the kernel mapped.
|
|
|
|
void AllocateKernelPMLs()
|
|
|
|
{
|
|
|
|
const addr_t flags = PML_PRESENT | PML_WRITABLE;
|
|
|
|
|
|
|
|
PML* const pml = PMLS[TOPPMLLEVEL];
|
|
|
|
|
|
|
|
size_t start = ENTRIES / 2;
|
|
|
|
size_t end = ENTRIES;
|
|
|
|
|
|
|
|
for ( size_t i = start; i < end; i++ )
|
|
|
|
{
|
|
|
|
if ( pml->entry[i] & PML_PRESENT ) { continue; }
|
|
|
|
|
|
|
|
addr_t page = Page::Get();
|
|
|
|
if ( !page ) { Panic("out of memory allocating boot PMLs"); }
|
|
|
|
|
|
|
|
pml->entry[i] = page | flags;
|
Fixed two very nasty bugs in the x86 memory management code.
1) The PML2 was not initialized to zeroes, thus leaving some bits behind that
caused the fork code to go crazy, forking the unforkable, and mapping addresses
that never, ever, should have been mapped, leaving behind a trail of page faults
and general protection faults on some computers, while other computers worked
because the uninitalized memory just wasn't uninitialized enough. Yep, this was
a schrödinbug!
2) Fixed a time bomb. The kernel heap was accidentally put such that whenever a
few megabytes were allocated, it would begin overwriting the physical page stack
causing unthinkable events to unfold and would probably be even more obscure to
debug than 1).
Oh, and some string errors fixed and removed RunApplication from kernel.cpp,
funny thing that even linked in the first place. Guess, the optimizer actually
did work for once. :)
2011-10-20 03:40:37 +02:00
|
|
|
|
|
|
|
// Invalidate the new PML and reset it to zeroes.
|
|
|
|
addr_t pmladdr = (addr_t) (PMLS[TOPPMLLEVEL-1] + i);
|
|
|
|
InvalidatePage(pmladdr);
|
|
|
|
Maxsi::Memory::Set((void*) pmladdr, 0, sizeof(PML));
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace Page
|
|
|
|
{
|
|
|
|
void ExtendStack()
|
|
|
|
{
|
|
|
|
// This call will always succeed, if it didn't, then the stack
|
|
|
|
// wouldn't be full, and thus this function won't be called.
|
|
|
|
addr_t page = Get();
|
|
|
|
|
|
|
|
// This call will also succeed, since there are plenty of physical
|
|
|
|
// pages available and it might need some.
|
2012-03-21 17:19:26 +01:00
|
|
|
addr_t virt = (addr_t) (STACK + stacklength);
|
|
|
|
if ( !Memory::Map(page, virt, PROT_KREAD | PROT_KWRITE) )
|
2011-11-29 01:21:59 +01:00
|
|
|
{
|
|
|
|
Panic("Unable to extend page stack, which should have worked");
|
|
|
|
}
|
2011-10-02 15:58:08 +02:00
|
|
|
|
|
|
|
// TODO: This may not be needed during the boot process!
|
|
|
|
//Memory::InvalidatePage((addr_t) (STACK + stacklength));
|
|
|
|
|
|
|
|
stacklength += 4096UL / sizeof(addr_t);
|
|
|
|
}
|
|
|
|
|
|
|
|
void InitPushRegion(addr_t position, size_t length)
|
|
|
|
{
|
|
|
|
// Align our entries on page boundaries.
|
|
|
|
addr_t newposition = Page::AlignUp(position);
|
|
|
|
length = Page::AlignDown((position + length) - newposition);
|
|
|
|
position = newposition;
|
|
|
|
|
|
|
|
while ( length )
|
|
|
|
{
|
|
|
|
if ( unlikely(stackused == stacklength) )
|
|
|
|
{
|
|
|
|
if ( stackused == MAXSTACKLENGTH )
|
|
|
|
{
|
|
|
|
pagesnotonstack += length / 4096UL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ExtendStack();
|
|
|
|
}
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
addr_t* stackentry = &(STACK[stackused++]);
|
|
|
|
*stackentry = position;
|
2011-10-02 15:58:08 +02:00
|
|
|
|
|
|
|
length -= 4096UL;
|
|
|
|
position += 4096UL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
bool Reserve(size_t* counter, size_t least, size_t ideal)
|
|
|
|
{
|
|
|
|
ASSERT(least < ideal);
|
|
|
|
size_t available = stackused - stackreserved;
|
|
|
|
if ( least < available ) { Error::Set(ENOMEM); return false; }
|
|
|
|
if ( available < ideal ) { ideal = available; }
|
|
|
|
stackreserved += ideal;
|
|
|
|
*counter += ideal;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Reserve(size_t* counter, size_t amount)
|
|
|
|
{
|
|
|
|
return Reserve(counter, amount, amount);
|
|
|
|
}
|
|
|
|
|
|
|
|
addr_t GetReserved(size_t* counter)
|
|
|
|
{
|
|
|
|
if ( !*counter ) { return false; }
|
|
|
|
ASSERT(stackused); // After all, we did _reserve_ the memory.
|
|
|
|
addr_t result = STACK[--stackused];
|
|
|
|
ASSERT(result == AlignDown(result));
|
|
|
|
stackreserved--;
|
|
|
|
(*counter)--;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
addr_t Get()
|
|
|
|
{
|
2012-03-21 17:19:26 +01:00
|
|
|
ASSERT(stackreserved <= stackused);
|
|
|
|
if ( unlikely(stackreserved == stackused) )
|
|
|
|
{
|
|
|
|
Error::Set(ENOMEM);
|
|
|
|
return 0;
|
|
|
|
}
|
2012-03-02 13:51:03 +01:00
|
|
|
addr_t result = STACK[--stackused];
|
|
|
|
ASSERT(result == AlignDown(result));
|
|
|
|
return result;
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void Put(addr_t page)
|
|
|
|
{
|
2012-03-02 13:51:03 +01:00
|
|
|
ASSERT(page == AlignDown(page));
|
2011-10-02 15:58:08 +02:00
|
|
|
ASSERT(stackused < MAXSTACKLENGTH);
|
|
|
|
STACK[stackused++] = page;
|
|
|
|
}
|
|
|
|
}
|
2012-03-19 02:39:11 +01:00
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
namespace Memory
|
|
|
|
{
|
2012-03-21 17:19:26 +01:00
|
|
|
addr_t ProtectionToPMLFlags(int prot)
|
|
|
|
{
|
|
|
|
addr_t result = 0;
|
|
|
|
if ( prot & PROT_EXEC ) { result |= PML_USERSPACE; }
|
|
|
|
if ( prot & PROT_READ ) { result |= PML_USERSPACE; }
|
|
|
|
if ( prot & PROT_WRITE ) { result |= PML_USERSPACE | PML_WRITABLE; }
|
|
|
|
if ( prot & PROT_KEXEC ) { result |= 0; }
|
|
|
|
if ( prot & PROT_KREAD ) { result |= 0; }
|
|
|
|
if ( prot & PROT_KWRITE ) { result |= PML_WRITABLE; }
|
|
|
|
if ( prot & PROT_FORK ) { result |= PML_FORK; }
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int PMLFlagsToProtection(addr_t flags)
|
|
|
|
{
|
|
|
|
int prot = PROT_KREAD | PROT_KEXEC;
|
|
|
|
bool user = flags & PML_USERSPACE;
|
|
|
|
bool write = flags & PML_WRITABLE;
|
|
|
|
if ( user ) { prot |= PROT_EXEC | PROT_READ; }
|
|
|
|
if ( user && write ) { prot |= PROT_WRITE; }
|
|
|
|
return prot;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ProvidedProtection(int prot)
|
|
|
|
{
|
|
|
|
addr_t flags = ProtectionToPMLFlags(prot);
|
|
|
|
return PMLFlagsToProtection(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool LookUp(addr_t mapto, addr_t* physical, int* protection)
|
|
|
|
{
|
|
|
|
// Translate the virtual address into PML indexes.
|
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
{
|
|
|
|
pmlchildid[i] = (mapto >> (12+(i-1)*TRANSBITS)) & MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
int prot = PROT_USER | PROT_KERNEL | PROT_FORK;
|
|
|
|
|
|
|
|
// For each PML level, make sure it exists.
|
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
|
|
|
|
|
|
|
addr_t entry = pml->entry[childid];
|
|
|
|
if ( !(entry & PML_PRESENT) ) { return false; }
|
|
|
|
int entryflags = entry & PML_ADDRESS;
|
|
|
|
int entryprot = PMLFlagsToProtection(entryflags);
|
|
|
|
prot &= entryprot;
|
|
|
|
|
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
|
|
|
offset = offset * ENTRIES + childid;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr_t entry = (PMLS[1] + offset)->entry[pmlchildid[1]];
|
|
|
|
int entryflags = entry & PML_ADDRESS;
|
|
|
|
int entryprot = PMLFlagsToProtection(entryflags);
|
|
|
|
prot &= entryprot;
|
|
|
|
addr_t phys = entry & PML_ADDRESS;
|
|
|
|
|
|
|
|
if ( physical ) { *physical = phys; }
|
|
|
|
if ( protection ) { *protection = prot; }
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
void InvalidatePage(addr_t /*addr*/)
|
|
|
|
{
|
|
|
|
// TODO: Actually just call the instruction.
|
|
|
|
Flush();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flushes the Translation Lookaside Buffer (TLB).
|
|
|
|
void Flush()
|
|
|
|
{
|
|
|
|
asm volatile("mov %0, %%cr3":: "r"(currentdir));
|
|
|
|
}
|
|
|
|
|
2012-04-09 14:15:40 +02:00
|
|
|
addr_t GetAddressSpace()
|
|
|
|
{
|
|
|
|
return currentdir;
|
|
|
|
}
|
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
addr_t SwitchAddressSpace(addr_t addrspace)
|
|
|
|
{
|
2012-03-02 00:09:08 +01:00
|
|
|
// Have fun debugging this.
|
Fixed the horrible 'nofoo' bug!
When compiled with gcc 4.6.1, 32-bit Sortix would triple fault during
early boot: When the TLB is being flushed, somehow a garbage value had
sneaked into Sortix::Memory::currentdir, and a non-page aligned (and
garbage) page directory is loaded. (Triple fault, here we come!)
However, adding a volatile addr_t foo after the currentdir variable
actually caused the system to boot correctly - the garbage was written
into that variable instead. To debug the problem, I set the foo value
to 0: as long as !foo (hence the name nofoo) everything was alright.
After closer examination I found that the initrd open code wrote to a
pointer supplied by kernel.cpp. The element pointed to was on the
stack. Worse, its address was the same as currentdir (now foo).
Indeed, the stack had gone into the kernel's data segment!
Turns out that this gcc configuration stores variables in the data
segment in the reverse order they are defined in, whereas previous
compilers did the opposite. The hack used to set up the stack during
early boot relied on this (now obviously incorrect) fact.
In effect, the stack was initialized to the end of the stack, not
the start of it: completely ignoring all the nice stack space
allocated in kernel.cpp.
I did not see that one coming.
2011-12-25 03:33:12 +01:00
|
|
|
if ( currentdir != Page::AlignDown(currentdir) )
|
|
|
|
{
|
2012-03-02 00:09:08 +01:00
|
|
|
PanicF("The variable containing the current address space "
|
|
|
|
"contains garbage all of sudden: it isn't page-aligned. "
|
|
|
|
"It contains the value 0x%zx.", currentdir);
|
Fixed the horrible 'nofoo' bug!
When compiled with gcc 4.6.1, 32-bit Sortix would triple fault during
early boot: When the TLB is being flushed, somehow a garbage value had
sneaked into Sortix::Memory::currentdir, and a non-page aligned (and
garbage) page directory is loaded. (Triple fault, here we come!)
However, adding a volatile addr_t foo after the currentdir variable
actually caused the system to boot correctly - the garbage was written
into that variable instead. To debug the problem, I set the foo value
to 0: as long as !foo (hence the name nofoo) everything was alright.
After closer examination I found that the initrd open code wrote to a
pointer supplied by kernel.cpp. The element pointed to was on the
stack. Worse, its address was the same as currentdir (now foo).
Indeed, the stack had gone into the kernel's data segment!
Turns out that this gcc configuration stores variables in the data
segment in the reverse order they are defined in, whereas previous
compilers did the opposite. The hack used to set up the stack during
early boot relied on this (now obviously incorrect) fact.
In effect, the stack was initialized to the end of the stack, not
the start of it: completely ignoring all the nice stack space
allocated in kernel.cpp.
I did not see that one coming.
2011-12-25 03:33:12 +01:00
|
|
|
}
|
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
// Don't switch if we are already there.
|
|
|
|
if ( addrspace == currentdir ) { return currentdir; }
|
|
|
|
|
2011-11-21 00:27:10 +01:00
|
|
|
if ( addrspace & 0xFFFUL ) { PanicF("addrspace 0x%zx was not page-aligned!", addrspace); }
|
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
addr_t previous = currentdir;
|
|
|
|
|
2011-10-22 15:17:58 +02:00
|
|
|
// Switch and flush the TLB.
|
2011-10-02 15:58:08 +02:00
|
|
|
asm volatile("mov %0, %%cr3":: "r"(addrspace));
|
|
|
|
|
|
|
|
currentdir = addrspace;
|
|
|
|
|
|
|
|
return previous;
|
|
|
|
}
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
bool MapRange(addr_t where, size_t bytes, int protection)
|
2011-10-02 15:58:08 +02:00
|
|
|
{
|
|
|
|
for ( addr_t page = where; page < where + bytes; page += 4096UL )
|
|
|
|
{
|
|
|
|
addr_t physicalpage = Page::Get();
|
|
|
|
if ( physicalpage == 0 )
|
|
|
|
{
|
|
|
|
while ( where < page )
|
|
|
|
{
|
|
|
|
page -= 4096UL;
|
2012-03-21 17:19:26 +01:00
|
|
|
physicalpage = Unmap(page);
|
2011-10-02 15:58:08 +02:00
|
|
|
Page::Put(physicalpage);
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
Map(physicalpage, page, protection);
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
bool UnmapRange(addr_t where, size_t bytes)
|
2011-10-02 15:58:08 +02:00
|
|
|
{
|
|
|
|
for ( addr_t page = where; page < where + bytes; page += 4096UL )
|
|
|
|
{
|
2012-03-21 17:19:26 +01:00
|
|
|
addr_t physicalpage = Unmap(page);
|
2011-10-02 15:58:08 +02:00
|
|
|
Page::Put(physicalpage);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
static bool MapInternal(addr_t physical, addr_t mapto, int prot, addr_t extraflags = 0)
|
2011-10-02 15:58:08 +02:00
|
|
|
{
|
2012-03-21 17:19:26 +01:00
|
|
|
addr_t flags = ProtectionToPMLFlags(prot) | PML_PRESENT;
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2011-10-22 15:17:58 +02:00
|
|
|
// Translate the virtual address into PML indexes.
|
2011-10-02 15:58:08 +02:00
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
{
|
|
|
|
pmlchildid[i] = (mapto >> (12+(i-1)*TRANSBITS)) & MASK;
|
|
|
|
}
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
// For each PML level, make sure it exists.
|
2011-10-02 15:58:08 +02:00
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
|
|
|
|
|
|
|
addr_t& entry = pml->entry[childid];
|
|
|
|
|
2011-11-29 01:21:59 +01:00
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
|
|
|
size_t childoffset = offset * ENTRIES + childid;
|
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
if ( !(entry & PML_PRESENT) )
|
|
|
|
{
|
|
|
|
// TODO: Possible memory leak when page allocation fails.
|
|
|
|
addr_t page = Page::Get();
|
2012-03-21 17:19:26 +01:00
|
|
|
|
|
|
|
if ( !page ) { return false; }
|
|
|
|
addr_t pmlflags = PML_PRESENT | PML_WRITABLE | PML_USERSPACE
|
|
|
|
| PML_FORK;
|
|
|
|
entry = page | pmlflags;
|
2011-10-02 15:58:08 +02:00
|
|
|
|
|
|
|
// Invalidate the new PML and reset it to zeroes.
|
2011-11-29 01:21:59 +01:00
|
|
|
addr_t pmladdr = (addr_t) (PMLS[i-1] + childoffset);
|
2011-10-02 15:58:08 +02:00
|
|
|
InvalidatePage(pmladdr);
|
|
|
|
Maxsi::Memory::Set((void*) pmladdr, 0, sizeof(PML));
|
|
|
|
}
|
|
|
|
|
2011-11-29 01:21:59 +01:00
|
|
|
offset = childoffset;
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
|
|
|
|
2011-10-22 15:17:58 +02:00
|
|
|
// Actually map the physical page to the virtual page.
|
2012-07-01 00:50:27 +02:00
|
|
|
const addr_t entry = physical | flags | extraflags;
|
|
|
|
(PMLS[1] + offset)->entry[pmlchildid[1]] = entry;
|
2012-03-21 17:19:26 +01:00
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
bool Map(addr_t physical, addr_t mapto, int prot)
|
|
|
|
{
|
|
|
|
return MapInternal(physical, mapto, prot);
|
|
|
|
}
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
void PageProtect(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
|
|
|
LookUp(mapto, &phys, NULL);
|
|
|
|
Map(phys, mapto, protection);
|
|
|
|
}
|
|
|
|
|
|
|
|
void PageProtectAdd(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
|
|
|
int prot;
|
|
|
|
LookUp(mapto, &phys, &prot);
|
|
|
|
prot |= protection;
|
|
|
|
Map(phys, mapto, prot);
|
|
|
|
}
|
|
|
|
|
|
|
|
void PageProtectSub(addr_t mapto, int protection)
|
|
|
|
{
|
|
|
|
addr_t phys;
|
|
|
|
int prot;
|
|
|
|
LookUp(mapto, &phys, &prot);
|
|
|
|
prot &= ~protection;
|
|
|
|
Map(phys, mapto, prot);
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
addr_t Unmap(addr_t mapto)
|
|
|
|
{
|
2011-10-22 15:17:58 +02:00
|
|
|
// Translate the virtual address into PML indexes.
|
2011-10-02 15:58:08 +02:00
|
|
|
const size_t MASK = (1<<TRANSBITS)-1;
|
|
|
|
size_t pmlchildid[TOPPMLLEVEL + 1];
|
|
|
|
for ( size_t i = 1; i <= TOPPMLLEVEL; i++ )
|
|
|
|
{
|
|
|
|
pmlchildid[i] = (mapto >> (12+(i-1)*TRANSBITS)) & MASK;
|
|
|
|
}
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
// For each PML level, make sure it exists.
|
2011-10-02 15:58:08 +02:00
|
|
|
size_t offset = 0;
|
|
|
|
for ( size_t i = TOPPMLLEVEL; i > 1; i-- )
|
|
|
|
{
|
|
|
|
size_t childid = pmlchildid[i];
|
|
|
|
PML* pml = PMLS[i] + offset;
|
|
|
|
|
|
|
|
addr_t& entry = pml->entry[childid];
|
|
|
|
|
|
|
|
if ( !(entry & PML_PRESENT) )
|
|
|
|
{
|
2012-03-21 17:19:26 +01:00
|
|
|
PanicF("Attempted to unmap virtual page %p, but the virtual"
|
|
|
|
" page was wasn't mapped. This is a bug in the code "
|
|
|
|
"code calling this function", mapto);
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
|
|
|
|
2011-10-22 15:17:58 +02:00
|
|
|
// Find the index of the next PML in the fractal mapped memory.
|
2011-10-02 15:58:08 +02:00
|
|
|
offset = offset * ENTRIES + childid;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr_t& entry = (PMLS[1] + offset)->entry[pmlchildid[1]];
|
|
|
|
addr_t result = entry & PML_ADDRESS;
|
|
|
|
entry = 0;
|
|
|
|
|
|
|
|
// TODO: If all the entries in PML[N] are not-present, then who
|
|
|
|
// unmaps its entry from PML[N-1]?
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-03-21 17:19:26 +01:00
|
|
|
bool MapPAT(addr_t physical, addr_t mapto, int prot, addr_t mtype)
|
2012-07-01 00:50:27 +02:00
|
|
|
{
|
|
|
|
addr_t extraflags = PAT2PMLFlags[mtype];
|
2012-03-21 17:19:26 +01:00
|
|
|
return MapInternal(physical, mapto, prot, extraflags);
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
void ForkCleanup(size_t i, size_t level)
|
|
|
|
{
|
|
|
|
PML* destpml = FORKPML + level;
|
|
|
|
if ( !i ) { return; }
|
|
|
|
for ( size_t n = 0; n < i-1; n++ )
|
|
|
|
{
|
|
|
|
addr_t entry = destpml->entry[i];
|
|
|
|
if ( !(entry & PML_FORK ) ) { continue; }
|
|
|
|
addr_t phys = entry & PML_ADDRESS;
|
|
|
|
if ( 1 < level )
|
|
|
|
{
|
|
|
|
addr_t destaddr = (addr_t) (FORKPML + level-1);
|
2012-03-21 17:19:26 +01:00
|
|
|
Map(phys, destaddr, PROT_KREAD | PROT_KWRITE);
|
2012-03-19 02:39:11 +01:00
|
|
|
InvalidatePage(destaddr);
|
|
|
|
ForkCleanup(ENTRIES+1UL, level-1);
|
|
|
|
}
|
|
|
|
Page::Put(phys);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-02 15:58:08 +02:00
|
|
|
// TODO: Copying every frame is endlessly useless in many uses. It'd be
|
2011-10-22 15:17:58 +02:00
|
|
|
// nice to upgrade this to a copy-on-write algorithm.
|
2012-03-19 02:39:11 +01:00
|
|
|
bool Fork(size_t level, size_t pmloffset)
|
2011-10-02 15:58:08 +02:00
|
|
|
{
|
2012-03-19 02:39:11 +01:00
|
|
|
PML* destpml = FORKPML + level;
|
|
|
|
for ( size_t i = 0; i < ENTRIES; i++ )
|
2011-10-02 15:58:08 +02:00
|
|
|
{
|
2012-03-19 02:39:11 +01:00
|
|
|
addr_t entry = (PMLS[level] + pmloffset)->entry[i];
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
// Link the entry if it isn't supposed to be forked.
|
|
|
|
if ( !(entry & PML_FORK ) )
|
2011-10-02 15:58:08 +02:00
|
|
|
{
|
2012-03-19 02:39:11 +01:00
|
|
|
destpml->entry[i] = entry;
|
2011-10-02 15:58:08 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
addr_t phys = Page::Get();
|
|
|
|
if ( unlikely(!phys) ) { ForkCleanup(i, level); return false; }
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
addr_t flags = entry & PML_FLAGS;
|
|
|
|
destpml->entry[i] = phys | flags;
|
|
|
|
|
|
|
|
// Map the destination page.
|
|
|
|
addr_t destaddr = (addr_t) (FORKPML + level-1);
|
2012-03-21 17:19:26 +01:00
|
|
|
Map(phys, destaddr, PROT_KREAD | PROT_KWRITE);
|
2012-03-19 02:39:11 +01:00
|
|
|
InvalidatePage(destaddr);
|
|
|
|
|
|
|
|
size_t offset = pmloffset * ENTRIES + i;
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
if ( 1 < level )
|
|
|
|
{
|
|
|
|
if ( !Fork(level-1, offset) )
|
2011-10-02 15:58:08 +02:00
|
|
|
{
|
2012-03-19 02:39:11 +01:00
|
|
|
Page::Put(phys);
|
|
|
|
ForkCleanup(i, level);
|
|
|
|
return false;
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
2012-03-19 02:39:11 +01:00
|
|
|
continue;
|
|
|
|
}
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
// Determine the source page's address.
|
|
|
|
const void* src = (const void*) (offset * 4096UL);
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
// Determine the destination page's address.
|
|
|
|
void* dest = (void*) (FORKPML + level - 1);
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
Maxsi::Memory::Copy(dest, src, 4096UL);
|
|
|
|
}
|
2011-09-21 20:52:29 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
return true;
|
|
|
|
}
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
bool Fork(addr_t dir, size_t level, size_t pmloffset)
|
|
|
|
{
|
|
|
|
PML* destpml = FORKPML + level;
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
// This call always succeeds.
|
2012-03-21 17:19:26 +01:00
|
|
|
Map(dir, (addr_t) destpml, PROT_KREAD | PROT_KWRITE);
|
2012-03-19 02:39:11 +01:00
|
|
|
InvalidatePage((addr_t) destpml);
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
return Fork(level, pmloffset);
|
|
|
|
}
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
// Create an exact copy of the current address space.
|
|
|
|
addr_t Fork()
|
|
|
|
{
|
|
|
|
addr_t dir = Page::Get();
|
|
|
|
if ( dir == 0 ) { return 0; }
|
|
|
|
if ( !Fork(dir, TOPPMLLEVEL, 0) ) { Page::Put(dir); return 0; }
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
// Now, the new top pml needs to have its fractal memory fixed.
|
|
|
|
const addr_t flags = PML_PRESENT | PML_WRITABLE;
|
|
|
|
addr_t mapto;
|
|
|
|
addr_t childaddr;
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
(FORKPML + TOPPMLLEVEL)->entry[ENTRIES-1] = dir | flags;
|
|
|
|
childaddr = (FORKPML + TOPPMLLEVEL)->entry[ENTRIES-2] & PML_ADDRESS;
|
2011-10-02 15:58:08 +02:00
|
|
|
|
2012-03-19 02:39:11 +01:00
|
|
|
for ( size_t i = TOPPMLLEVEL-1; i > 0; i-- )
|
|
|
|
{
|
|
|
|
mapto = (addr_t) (FORKPML + i);
|
2012-03-21 17:19:26 +01:00
|
|
|
Map(childaddr, mapto, PROT_KREAD | PROT_KWRITE);
|
2012-03-19 02:39:11 +01:00
|
|
|
InvalidatePage(mapto);
|
|
|
|
(FORKPML + i)->entry[ENTRIES-1] = dir | flags;
|
|
|
|
childaddr = (FORKPML + i)->entry[ENTRIES-2] & PML_ADDRESS;
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
2012-03-19 02:39:11 +01:00
|
|
|
return dir;
|
2011-10-02 15:58:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|