diff --git a/kernel/include/sortix/kernel/memorymanagement.h b/kernel/include/sortix/kernel/memorymanagement.h index 8bd0e232..7970347d 100644 --- a/kernel/include/sortix/kernel/memorymanagement.h +++ b/kernel/include/sortix/kernel/memorymanagement.h @@ -1,6 +1,6 @@ /******************************************************************************* - Copyright(C) Jonas 'Sortie' Termansen 2011, 2012. + Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2014. This file is part of Sortix. @@ -22,74 +22,80 @@ *******************************************************************************/ -#ifndef SORTIX_MEMORYMANAGEMENT_H -#define SORTIX_MEMORYMANAGEMENT_H +#ifndef INCLUDE_SORTIX_KERNEL_MEMORYMANAGEMENT_H +#define INCLUDE_SORTIX_KERNEL_MEMORYMANAGEMENT_H -// Forward declarations. typedef struct multiboot_info multiboot_info_t; -namespace Sortix -{ - class Process; +namespace Sortix { - namespace Page - { - bool Reserve(size_t* counter, size_t amount); - bool ReserveUnlocked(size_t* counter, size_t amount); - bool Reserve(size_t* counter, size_t least, size_t ideal); - bool ReserveUnlocked(size_t* counter, size_t least, size_t ideal); - addr_t GetReserved(size_t* counter); - addr_t GetReservedUnlocked(size_t* counter); - addr_t Get(); - addr_t GetUnlocked(); - void Put(addr_t page); - void PutUnlocked(addr_t page); - void Lock(); - void Unlock(); +class Process; - inline size_t Size() { return 4096UL; } +} // namespace Sortix - // Rounds a memory address down to nearest page. - inline addr_t AlignDown(addr_t page) { return page & ~(0xFFFUL); } +namespace Sortix { +namespace Page { - // Rounds a memory address up to nearest page. - inline addr_t AlignUp(addr_t page) { return AlignDown(page + 0xFFFUL); } +bool Reserve(size_t* counter, size_t amount); +bool ReserveUnlocked(size_t* counter, size_t amount); +bool Reserve(size_t* counter, size_t least, size_t ideal); +bool ReserveUnlocked(size_t* counter, size_t least, size_t ideal); +addr_t GetReserved(size_t* counter); +addr_t GetReservedUnlocked(size_t* counter); +addr_t Get(); +addr_t GetUnlocked(); +void Put(addr_t page); +void PutUnlocked(addr_t page); +void Lock(); +void Unlock(); - // Tests whether an address is page aligned. - inline bool IsAligned(addr_t page) { return AlignDown(page) == page; } - } +inline size_t Size() { return 4096UL; } - namespace Memory - { - void Init(multiboot_info_t* bootinfo); - void InvalidatePage(addr_t addr); - void Flush(); - addr_t Fork(); - addr_t GetAddressSpace(); - addr_t SwitchAddressSpace(addr_t addrspace); - void DestroyAddressSpace(addr_t fallback = 0, - void (*func)(addr_t, void*) = NULL, - void* user = NULL); - bool Map(addr_t physical, addr_t mapto, int prot); - addr_t Unmap(addr_t mapto); - addr_t Physical(addr_t mapto); - int PageProtection(addr_t mapto); - bool LookUp(addr_t mapto, addr_t* physical, int* prot); - int ProvidedProtection(int prot); - void PageProtect(addr_t mapto, int protection); - void PageProtectAdd(addr_t mapto, int protection); - void PageProtectSub(addr_t mapto, int protection); - bool MapRange(addr_t where, size_t bytes, int protection); - bool UnmapRange(addr_t where, size_t bytes); - void Statistics(size_t* amountused, size_t* totalmem); - addr_t GetKernelStack(); - size_t GetKernelStackSize(); - void GetKernelVirtualArea(addr_t* from, size_t* size); - void GetUserVirtualArea(uintptr_t* from, size_t* size); - void UnmapMemory(Process* process, uintptr_t addr, size_t size); - bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot); - bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot); - } -} +// Rounds a memory address down to nearest page. +inline addr_t AlignDown(addr_t page) { return page & ~(0xFFFUL); } + +// Rounds a memory address up to nearest page. +inline addr_t AlignUp(addr_t page) { return AlignDown(page + 0xFFFUL); } + +// Tests whether an address is page aligned. +inline bool IsAligned(addr_t page) { return AlignDown(page) == page; } + +} // namespace Page +} // namespace Sortix + +namespace Sortix { +namespace Memory { + +void Init(multiboot_info_t* bootinfo); +void InvalidatePage(addr_t addr); +void Flush(); +addr_t Fork(); +addr_t GetAddressSpace(); +addr_t SwitchAddressSpace(addr_t addrspace); +void DestroyAddressSpace(addr_t fallback = 0, + void (*func)(addr_t, void*) = NULL, + void* user = NULL); +bool Map(addr_t physical, addr_t mapto, int prot); +addr_t Unmap(addr_t mapto); +addr_t Physical(addr_t mapto); +int PageProtection(addr_t mapto); +bool LookUp(addr_t mapto, addr_t* physical, int* prot); +int ProvidedProtection(int prot); +void PageProtect(addr_t mapto, int protection); +void PageProtectAdd(addr_t mapto, int protection); +void PageProtectSub(addr_t mapto, int protection); +bool MapRange(addr_t where, size_t bytes, int protection); +bool UnmapRange(addr_t where, size_t bytes); +void Statistics(size_t* amountused, size_t* totalmem); +addr_t GetKernelStack(); +size_t GetKernelStackSize(); +void GetKernelVirtualArea(addr_t* from, size_t* size); +void GetUserVirtualArea(uintptr_t* from, size_t* size); +void UnmapMemory(Process* process, uintptr_t addr, size_t size); +bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot); +bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot); + +} // namespace Memory +} // namespace Sortix #endif diff --git a/kernel/x64/memorymanagement.cpp b/kernel/x64/memorymanagement.cpp index 89dbcd18..7a105f52 100644 --- a/kernel/x64/memorymanagement.cpp +++ b/kernel/x64/memorymanagement.cpp @@ -1,6 +1,6 @@ /******************************************************************************* - Copyright(C) Jonas 'Sortie' Termansen 2011, 2012. + Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2014. This file is part of Sortix. @@ -31,181 +31,188 @@ #include "multiboot.h" #include "x86-family/memorymanagement.h" -namespace Sortix +namespace Sortix { +namespace Page { + +extern size_t stackused; +extern size_t stacklength; +void ExtendStack(); + +} // namespace Page +} // namespace Sortix + +namespace Sortix { +namespace Memory { + +extern addr_t currentdir; + +void InitCPU() { - namespace Page + // The x64 boot code already set up virtual memory and identity + // mapped the first 2 MiB. This code finishes the job such that + // virtual memory is fully usable and manageable. + + // boot.s already initialized everything from 0x1000UL to 0xE000UL + // to zeroes. Since these structures are already used, doing it here + // will be very dangerous. + + PML* const BOOTPML4 = (PML* const) 0x21000UL; + PML* const BOOTPML3 = (PML* const) 0x26000UL; + PML* const BOOTPML2 = (PML* const) 0x27000UL; + PML* const BOOTPML1 = (PML* const) 0x28000UL; + + // First order of business is to map the virtual memory structures + // to the pre-defined locations in the virtual address space. + addr_t flags = PML_PRESENT | PML_WRITABLE; + + // Fractal map the PML1s. + BOOTPML4->entry[511] = (addr_t) BOOTPML4 | flags; + + // Fractal map the PML2s. + BOOTPML4->entry[510] = (addr_t) BOOTPML3 | flags | PML_FORK; + BOOTPML3->entry[511] = (addr_t) BOOTPML4 | flags; + + // Fractal map the PML3s. + BOOTPML3->entry[510] = (addr_t) BOOTPML2 | flags | PML_FORK; + BOOTPML2->entry[511] = (addr_t) BOOTPML4 | flags; + + // Fractal map the PML4s. + BOOTPML2->entry[510] = (addr_t) BOOTPML1 | flags | PML_FORK; + BOOTPML1->entry[511] = (addr_t) BOOTPML4 | flags; + + // Add some predefined room for forking address spaces. + PML* const FORKPML2 = (PML* const) 0x29000UL; + PML* const FORKPML1 = (PML* const) 0x2A000UL; + + BOOTPML3->entry[0] = (addr_t) FORKPML2 | flags | PML_FORK; + BOOTPML2->entry[0] = (addr_t) FORKPML1 | flags | PML_FORK; + + currentdir = (addr_t) BOOTPML4; + + // The virtual memory structures are now available on the predefined + // locations. This means the virtual memory code is bootstrapped. Of + // course, we still have no physical page allocator, so that's the + // next step. + + PML* const PHYSPML3 = (PML* const) 0x2B000UL; + PML* const PHYSPML2 = (PML* const) 0x2C000UL; + PML* const PHYSPML1 = (PML* const) 0x2D000UL; + PML* const PHYSPML0 = (PML* const) 0x2E000UL; + + BOOTPML4->entry[509] = (addr_t) PHYSPML3 | flags; + PHYSPML3->entry[0] = (addr_t) PHYSPML2 | flags; + PHYSPML2->entry[0] = (addr_t) PHYSPML1 | flags; + PHYSPML1->entry[0] = (addr_t) PHYSPML0 | flags; + + Page::stackused = 0; + Page::stacklength = 4096UL / sizeof(addr_t); + + // The physical memory allocator should now be ready for use. Next + // up, the calling function will fill up the physical allocator with + // plenty of nice physical pages. (see Page::InitPushRegion) +} + +// Please note that even if this function exists, you should still clean +// up the address space of a process _before_ calling +// DestroyAddressSpace. This is just a hack because it currently is +// impossible to clean up PLM1's using the MM api! +// --- +// TODO: This function is duplicated in {x86,x64}/memorymanagement.cpp! +// --- +void RecursiveFreeUserspacePages(size_t level, size_t offset) +{ + PML* pml = PMLS[level] + offset; + for ( size_t i = 0; i < ENTRIES; i++ ) { - extern size_t stackused; - extern size_t stacklength; - void ExtendStack(); - } - - namespace Memory - { - extern addr_t currentdir; - - void InitCPU() - { - // The x64 boot code already set up virtual memory and identity - // mapped the first 2 MiB. This code finishes the job such that - // virtual memory is fully usable and manageable. - - // boot.s already initialized everything from 0x1000UL to 0xE000UL - // to zeroes. Since these structures are already used, doing it here - // will be very dangerous. - - PML* const BOOTPML4 = (PML* const) 0x21000UL; - PML* const BOOTPML3 = (PML* const) 0x26000UL; - PML* const BOOTPML2 = (PML* const) 0x27000UL; - PML* const BOOTPML1 = (PML* const) 0x28000UL; - - // First order of business is to map the virtual memory structures - // to the pre-defined locations in the virtual address space. - addr_t flags = PML_PRESENT | PML_WRITABLE; - - // Fractal map the PML1s. - BOOTPML4->entry[511] = (addr_t) BOOTPML4 | flags; - - // Fractal map the PML2s. - BOOTPML4->entry[510] = (addr_t) BOOTPML3 | flags | PML_FORK; - BOOTPML3->entry[511] = (addr_t) BOOTPML4 | flags; - - // Fractal map the PML3s. - BOOTPML3->entry[510] = (addr_t) BOOTPML2 | flags | PML_FORK; - BOOTPML2->entry[511] = (addr_t) BOOTPML4 | flags; - - // Fractal map the PML4s. - BOOTPML2->entry[510] = (addr_t) BOOTPML1 | flags | PML_FORK; - BOOTPML1->entry[511] = (addr_t) BOOTPML4 | flags; - - // Add some predefined room for forking address spaces. - PML* const FORKPML2 = (PML* const) 0x29000UL; - PML* const FORKPML1 = (PML* const) 0x2A000UL; - - BOOTPML3->entry[0] = (addr_t) FORKPML2 | flags | PML_FORK; - BOOTPML2->entry[0] = (addr_t) FORKPML1 | flags | PML_FORK; - - currentdir = (addr_t) BOOTPML4; - - // The virtual memory structures are now available on the predefined - // locations. This means the virtual memory code is bootstrapped. Of - // course, we still have no physical page allocator, so that's the - // next step. - - PML* const PHYSPML3 = (PML* const) 0x2B000UL; - PML* const PHYSPML2 = (PML* const) 0x2C000UL; - PML* const PHYSPML1 = (PML* const) 0x2D000UL; - PML* const PHYSPML0 = (PML* const) 0x2E000UL; - - BOOTPML4->entry[509] = (addr_t) PHYSPML3 | flags; - PHYSPML3->entry[0] = (addr_t) PHYSPML2 | flags; - PHYSPML2->entry[0] = (addr_t) PHYSPML1 | flags; - PHYSPML1->entry[0] = (addr_t) PHYSPML0 | flags; - - Page::stackused = 0; - Page::stacklength = 4096UL / sizeof(addr_t); - - // The physical memory allocator should now be ready for use. Next - // up, the calling function will fill up the physical allocator with - // plenty of nice physical pages. (see Page::InitPushRegion) - } - - // Please note that even if this function exists, you should still clean - // up the address space of a process _before_ calling - // DestroyAddressSpace. This is just a hack because it currently is - // impossible to clean up PLM1's using the MM api! - // --- - // TODO: This function is duplicated in {x86,x64}/memorymanagement.cpp! - // --- - void RecursiveFreeUserspacePages(size_t level, size_t offset) - { - PML* pml = PMLS[level] + offset; - for ( size_t i = 0; i < ENTRIES; i++ ) - { - addr_t entry = pml->entry[i]; - if ( !(entry & PML_PRESENT) ) { continue; } - if ( !(entry & PML_USERSPACE) ) { continue; } - if ( !(entry & PML_FORK) ) { continue; } - if ( level > 1 ) { RecursiveFreeUserspacePages(level-1, offset * ENTRIES + i); } - addr_t addr = pml->entry[i] & PML_ADDRESS; - // No need to unmap the page, we just need to mark it as unused. - Page::PutUnlocked(addr); - } - } - - void DestroyAddressSpace(addr_t fallback, void (*func)(addr_t, void*), void* user) - { - // Look up the last few entries used for the fractal mapping. These - // cannot be unmapped as that would destroy the world. Instead, we - // will remember them, switch to another adress space, and safely - // mark them as unused. Also handling the forking related pages. - addr_t fractal3 = (PMLS[4] + 0)->entry[510UL]; - addr_t fork2 = (PMLS[3] + 510UL)->entry[0]; - addr_t fractal2 = (PMLS[3] + 510UL)->entry[510]; - addr_t fork1 = (PMLS[2] + 510UL * 512UL + 510UL)->entry[0]; - addr_t fractal1 = (PMLS[2] + 510UL * 512UL + 510UL)->entry[510]; - addr_t dir = currentdir; - - // We want to free the pages, but we are still using them ourselves, - // so lock the page allocation structure until we are done. - Page::Lock(); - - // In case any pages wasn't cleaned at this point. - // TODO: Page::Put calls may internally Page::Get and then reusing pages we are not done with just yet - RecursiveFreeUserspacePages(TOPPMLLEVEL, 0); - - // Switch to the address space from when the world was originally - // created. It should contain the kernel, the whole kernel, and - // nothing but the kernel. - PML* const BOOTPML4 = (PML* const) 0x21000UL; - if ( !fallback ) - fallback = (addr_t) BOOTPML4; - - if ( func ) - func(fallback, user); - else - SwitchAddressSpace(fallback); - - // Ok, now we got marked everything left behind as unused, we can - // now safely let another thread use the pages. - Page::Unlock(); - - // These are safe to free since we switched address space. - Page::Put(fractal3 & PML_ADDRESS); - Page::Put(fractal2 & PML_ADDRESS); - Page::Put(fractal1 & PML_ADDRESS); - Page::Put(fork2 & PML_ADDRESS); - Page::Put(fork1 & PML_ADDRESS); - Page::Put(dir & PML_ADDRESS); - } - - const size_t KERNEL_STACK_SIZE = 256UL * 1024UL; - const addr_t KERNEL_STACK_END = 0xFFFF800000001000UL; - const addr_t KERNEL_STACK_START = KERNEL_STACK_END + KERNEL_STACK_SIZE; - - const addr_t VIRTUAL_AREA_LOWER = KERNEL_STACK_START; - const addr_t VIRTUAL_AREA_UPPER = 0xFFFFFE8000000000UL; - - void GetKernelVirtualArea(addr_t* from, size_t* size) - { - *from = KERNEL_STACK_END; - *size = VIRTUAL_AREA_UPPER - VIRTUAL_AREA_LOWER; - } - - void GetUserVirtualArea(uintptr_t* from, size_t* size) - { - *from = 0x400000; // 4 MiB. - *size = 0x800000000000 - *from; // 128 TiB - 4 MiB. - } - - addr_t GetKernelStack() - { - return KERNEL_STACK_START; - } - - size_t GetKernelStackSize() - { - return KERNEL_STACK_SIZE; - } + addr_t entry = pml->entry[i]; + if ( !(entry & PML_PRESENT) ) + continue; + if ( !(entry & PML_USERSPACE) ) + continue; + if ( !(entry & PML_FORK) ) + continue; + if ( 1 < level ) + RecursiveFreeUserspacePages(level-1, offset * ENTRIES + i); + addr_t addr = pml->entry[i] & PML_ADDRESS; + // No need to unmap the page, we just need to mark it as unused. + Page::PutUnlocked(addr); } } + +void DestroyAddressSpace(addr_t fallback, void (*func)(addr_t, void*), void* user) +{ + // Look up the last few entries used for the fractal mapping. These + // cannot be unmapped as that would destroy the world. Instead, we + // will remember them, switch to another adress space, and safely + // mark them as unused. Also handling the forking related pages. + addr_t fractal3 = (PMLS[4] + 0)->entry[510UL]; + addr_t fork2 = (PMLS[3] + 510UL)->entry[0]; + addr_t fractal2 = (PMLS[3] + 510UL)->entry[510]; + addr_t fork1 = (PMLS[2] + 510UL * 512UL + 510UL)->entry[0]; + addr_t fractal1 = (PMLS[2] + 510UL * 512UL + 510UL)->entry[510]; + addr_t dir = currentdir; + + // We want to free the pages, but we are still using them ourselves, + // so lock the page allocation structure until we are done. + Page::Lock(); + + // In case any pages wasn't cleaned at this point. + // TODO: Page::Put calls may internally Page::Get and then reusing pages we are not done with just yet + RecursiveFreeUserspacePages(TOPPMLLEVEL, 0); + + // Switch to the address space from when the world was originally + // created. It should contain the kernel, the whole kernel, and + // nothing but the kernel. + PML* const BOOTPML4 = (PML* const) 0x21000UL; + if ( !fallback ) + fallback = (addr_t) BOOTPML4; + + if ( func ) + func(fallback, user); + else + SwitchAddressSpace(fallback); + + // Ok, now we got marked everything left behind as unused, we can + // now safely let another thread use the pages. + Page::Unlock(); + + // These are safe to free since we switched address space. + Page::Put(fractal3 & PML_ADDRESS); + Page::Put(fractal2 & PML_ADDRESS); + Page::Put(fractal1 & PML_ADDRESS); + Page::Put(fork2 & PML_ADDRESS); + Page::Put(fork1 & PML_ADDRESS); + Page::Put(dir & PML_ADDRESS); +} + +const size_t KERNEL_STACK_SIZE = 256UL * 1024UL; +const addr_t KERNEL_STACK_END = 0xFFFF800000001000UL; +const addr_t KERNEL_STACK_START = KERNEL_STACK_END + KERNEL_STACK_SIZE; + +const addr_t VIRTUAL_AREA_LOWER = KERNEL_STACK_START; +const addr_t VIRTUAL_AREA_UPPER = 0xFFFFFE8000000000UL; + +void GetKernelVirtualArea(addr_t* from, size_t* size) +{ + *from = KERNEL_STACK_END; + *size = VIRTUAL_AREA_UPPER - VIRTUAL_AREA_LOWER; +} + +void GetUserVirtualArea(uintptr_t* from, size_t* size) +{ + *from = 0x400000; // 4 MiB. + *size = 0x800000000000 - *from; // 128 TiB - 4 MiB. +} + +addr_t GetKernelStack() +{ + return KERNEL_STACK_START; +} + +size_t GetKernelStackSize() +{ + return KERNEL_STACK_SIZE; +} + +} // namespace Memory +} // namespace Sortix diff --git a/kernel/x64/memorymanagement.h b/kernel/x64/memorymanagement.h index 0555de07..91d0e63e 100644 --- a/kernel/x64/memorymanagement.h +++ b/kernel/x64/memorymanagement.h @@ -1,6 +1,6 @@ /******************************************************************************* - Copyright(C) Jonas 'Sortie' Termansen 2011. + Copyright(C) Jonas 'Sortie' Termansen 2011, 2014. This file is part of Sortix. @@ -25,32 +25,35 @@ #ifndef SORTIX_X64_MEMORYMANAGEMENT_H #define SORTIX_X64_MEMORYMANAGEMENT_H -namespace Sortix +namespace Sortix { +namespace Memory { + +const size_t TOPPMLLEVEL = 4; +const size_t ENTRIES = 4096UL / sizeof(addr_t); +const size_t TRANSBITS = 9; + +PML* const PMLS[TOPPMLLEVEL + 1] = { - namespace Memory - { - const size_t TOPPMLLEVEL = 4; - const size_t ENTRIES = 4096UL / sizeof(addr_t); - const size_t TRANSBITS = 9; + (PML* const) 0x0, + (PML* const) 0xFFFFFF8000000000UL, + (PML* const) 0xFFFFFF7FC0000000UL, + (PML* const) 0XFFFFFF7FBFE00000UL, + (PML* const) 0xFFFFFF7FBFDFF000UL, +}; - PML* const PMLS[TOPPMLLEVEL + 1] = - { - (PML* const) 0x0, - (PML* const) 0xFFFFFF8000000000UL, - (PML* const) 0xFFFFFF7FC0000000UL, - (PML* const) 0XFFFFFF7FBFE00000UL, - (PML* const) 0xFFFFFF7FBFDFF000UL, - }; +PML* const FORKPML = (PML* const) 0xFFFFFF0000000000UL; - PML* const FORKPML = (PML* const) 0xFFFFFF0000000000UL; - } +} // namespace Memory +} // namespace Sortix - namespace Page - { - addr_t* const STACK = (addr_t* const) 0xFFFFFE8000000000UL; - const size_t MAXSTACKSIZE = (512UL*1024UL*1024UL*1024UL); - const size_t MAXSTACKLENGTH = MAXSTACKSIZE / sizeof(addr_t); - } -} +namespace Sortix { +namespace Page { + +addr_t* const STACK = (addr_t* const) 0xFFFFFE8000000000UL; +const size_t MAXSTACKSIZE = (512UL*1024UL*1024UL*1024UL); +const size_t MAXSTACKLENGTH = MAXSTACKSIZE / sizeof(addr_t); + +} // namespace Page +} // namespace Sortix #endif diff --git a/kernel/x86-family/memorymanagement.cpp b/kernel/x86-family/memorymanagement.cpp index 76f80f08..04b17cd2 100644 --- a/kernel/x86-family/memorymanagement.cpp +++ b/kernel/x86-family/memorymanagement.cpp @@ -1,6 +1,6 @@ /******************************************************************************* - Copyright(C) Jonas 'Sortie' Termansen 2011, 2012. + Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2014. This file is part of Sortix. @@ -38,733 +38,763 @@ #include "memorymanagement.h" #include "msr.h" -namespace Sortix +namespace Sortix { + +extern size_t end; + +} // namespace Sortix + +namespace Sortix { +namespace Page { + +void InitPushRegion(addr_t position, size_t length); +size_t pagesnotonstack; +size_t stackused; +size_t stackreserved; +size_t stacklength; +size_t totalmem; +kthread_mutex_t pagelock; + +} // namespace Page +} // namespace Sortix + +namespace Sortix { +namespace Memory { + +addr_t currentdir = 0; + +void InitCPU(); +void AllocateKernelPMLs(); +int SysMemStat(size_t* memused, size_t* memtotal); +addr_t PAT2PMLFlags[PAT_NUM]; + +void InitCPU(multiboot_info_t* bootinfo) { - extern size_t end; - - namespace Page + const size_t MAXKERNELEND = 0x400000UL; /* 4 MiB */ + addr_t kernelend = Page::AlignUp((addr_t) &end); + if ( MAXKERNELEND < kernelend ) { - void InitPushRegion(addr_t position, size_t length); - size_t pagesnotonstack; - size_t stackused; - size_t stackreserved; - size_t stacklength; - size_t totalmem; - kthread_mutex_t pagelock; + Log::PrintF("Warning: The kernel is too big! It ends at 0x%zx, " + "but the highest ending address supported is 0x%zx. " + "The system may not boot correctly.\n", kernelend, + MAXKERNELEND); } - namespace Memory + Page::stackreserved = 0; + Page::pagesnotonstack = 0; + Page::totalmem = 0; + Page::pagelock = KTHREAD_MUTEX_INITIALIZER; + + if ( !( bootinfo->flags & MULTIBOOT_INFO_MEM_MAP ) ) + Panic("memorymanagement.cpp: The memory map flag was't set in " + "the multiboot structure. Are your bootloader multiboot " + "specification compliant?"); + + // If supported, setup the Page Attribute Table feature that allows + // us to control the memory type (caching) of memory more precisely. + if ( MSR::IsPATSupported() ) { - addr_t currentdir = 0; + MSR::InitializePAT(); + for ( addr_t i = 0; i < PAT_NUM; i++ ) + PAT2PMLFlags[i] = EncodePATAsPMLFlag(i); + } + // Otherwise, reroute all requests to the backwards compatible + // scheme. TODO: Not all early 32-bit x86 CPUs supports these + // values, so we need yet another fallback. + else + { + PAT2PMLFlags[PAT_UC] = PML_WRTHROUGH | PML_NOCACHE; + PAT2PMLFlags[PAT_WC] = PML_WRTHROUGH | PML_NOCACHE; // Approx. + PAT2PMLFlags[2] = 0; // No such flag. + PAT2PMLFlags[3] = 0; // No such flag. + PAT2PMLFlags[PAT_WT] = PML_WRTHROUGH; + PAT2PMLFlags[PAT_WP] = PML_WRTHROUGH; // Approx. + PAT2PMLFlags[PAT_WB] = 0; + PAT2PMLFlags[PAT_UCM] = PML_NOCACHE; + } - void InitCPU(); - void AllocateKernelPMLs(); - int SysMemStat(size_t* memused, size_t* memtotal); - addr_t PAT2PMLFlags[PAT_NUM]; + // Initialize CPU-specific things. + InitCPU(); - void InitCPU(multiboot_info_t* bootinfo) + typedef const multiboot_memory_map_t* mmap_t; + + // Loop over every detected memory region. + for ( + mmap_t mmap = (mmap_t) (addr_t) bootinfo->mmap_addr; + (addr_t) mmap < bootinfo->mmap_addr + bootinfo->mmap_length; + mmap = (mmap_t) ((addr_t) mmap + mmap->size + sizeof(mmap->size)) + ) + { + // Check that we can use this kind of RAM. + if ( mmap->type != 1 ) + continue; + + // The kernel's code may split this memory area into multiple pieces. + addr_t base = (addr_t) mmap->addr; + size_t length = Page::AlignDown(mmap->len); + +#if defined(__i386__) + // Figure out if the memory area is addressable (are our pointers big enough?) + if ( 0xFFFFFFFFULL < mmap->addr ) + continue; + if ( 0xFFFFFFFFULL < mmap->addr + mmap->len ) + length = 0x100000000ULL - mmap->addr; +#endif + + // Count the amount of usable RAM (even if reserved for kernel). + Page::totalmem += length; + + // Give all the physical memory to the physical memory allocator + // but make sure not to give it things we already use. + addr_t regionstart = base; + addr_t regionend = base + length; + addr_t processed = regionstart; + while ( processed < regionend ) { - const size_t MAXKERNELEND = 0x400000UL; /* 4 MiB */ - addr_t kernelend = Page::AlignUp((addr_t) &end); - if ( MAXKERNELEND < kernelend ) + addr_t lowest = processed; + addr_t highest = regionend; + + // Don't allocate the kernel. + if ( lowest < kernelend ) { - Log::PrintF("Warning: The kernel is too big! It ends at 0x%zx, " - "but the highest ending address supported is 0x%zx. " - "The system may not boot correctly.\n", kernelend, - MAXKERNELEND); + processed = kernelend; + continue; } - Page::stackreserved = 0; - Page::pagesnotonstack = 0; - Page::totalmem = 0; - Page::pagelock = KTHREAD_MUTEX_INITIALIZER; - - if ( !( bootinfo->flags & MULTIBOOT_INFO_MEM_MAP ) ) + // Don't give any of our modules to the physical page + // allocator, we'll need them. + bool continuing = false; + uint32_t* modules = (uint32_t*) (addr_t) bootinfo->mods_addr; + for ( uint32_t i = 0; i < bootinfo->mods_count; i++ ) { - Panic("memorymanagement.cpp: The memory map flag was't set in " - "the multiboot structure. Are your bootloader multiboot " - "specification compliant?"); - } - - // If supported, setup the Page Attribute Table feature that allows - // us to control the memory type (caching) of memory more precisely. - if ( MSR::IsPATSupported() ) - { - MSR::InitializePAT(); - for ( addr_t i = 0; i < PAT_NUM; i++ ) - PAT2PMLFlags[i] = EncodePATAsPMLFlag(i); - } - // Otherwise, reroute all requests to the backwards compatible - // scheme. TODO: Not all early 32-bit x86 CPUs supports these - // values, so we need yet another fallback. - else - { - PAT2PMLFlags[PAT_UC] = PML_WRTHROUGH | PML_NOCACHE; - PAT2PMLFlags[PAT_WC] = PML_WRTHROUGH | PML_NOCACHE; // Approx. - PAT2PMLFlags[2] = 0; // No such flag. - PAT2PMLFlags[3] = 0; // No such flag. - PAT2PMLFlags[PAT_WT] = PML_WRTHROUGH; - PAT2PMLFlags[PAT_WP] = PML_WRTHROUGH; // Approx. - PAT2PMLFlags[PAT_WB] = 0; - PAT2PMLFlags[PAT_UCM] = PML_NOCACHE; - } - - // Initialize CPU-specific things. - InitCPU(); - - typedef const multiboot_memory_map_t* mmap_t; - - // Loop over every detected memory region. - for ( - mmap_t mmap = (mmap_t) (addr_t) bootinfo->mmap_addr; - (addr_t) mmap < bootinfo->mmap_addr + bootinfo->mmap_length; - mmap = (mmap_t) ((addr_t) mmap + mmap->size + sizeof(mmap->size)) - ) - { - // Check that we can use this kind of RAM. - if ( mmap->type != 1 ) { continue; } - - // The kernel's code may split this memory area into multiple pieces. - addr_t base = (addr_t) mmap->addr; - size_t length = Page::AlignDown(mmap->len); - - #if defined(__i386__) - // Figure out if the memory area is addressable (are our pointers big enough?) - if ( 0xFFFFFFFFULL < mmap->addr ) { continue; } - if ( 0xFFFFFFFFULL < mmap->addr + mmap->len ) { length = 0x100000000ULL - mmap->addr; } - #endif - - // Count the amount of usable RAM (even if reserved for kernel). - Page::totalmem += length; - - // Give all the physical memory to the physical memory allocator - // but make sure not to give it things we already use. - addr_t regionstart = base; - addr_t regionend = base + length; - addr_t processed = regionstart; - while ( processed < regionend ) + size_t modsize = (size_t) (modules[2*i+1] - modules[2*i+0]); + addr_t modstart = (addr_t) modules[2*i+0]; + addr_t modend = modstart + modsize; + if ( modstart <= processed && processed < modend ) { - addr_t lowest = processed; - addr_t highest = regionend; - - // Don't allocate the kernel. - if ( lowest < kernelend ) { processed = kernelend; continue; } - - // Don't give any of our modules to the physical page - // allocator, we'll need them. - bool continuing = false; - uint32_t* modules = (uint32_t*) (addr_t) bootinfo->mods_addr; - for ( uint32_t i = 0; i < bootinfo->mods_count; i++ ) - { - size_t modsize = (size_t) (modules[2*i+1] - modules[2*i+0]); - addr_t modstart = (addr_t) modules[2*i+0]; - addr_t modend = modstart + modsize; - if ( modstart <= processed && processed < modend ) - { - processed = modend; - continuing = true; - break; - } - if ( lowest <= modstart && modstart < highest ) - { - highest = modstart; - } - } - - if ( continuing ) { continue; } - - if ( highest <= lowest ) { break; } - - // Now that we have a continious area not used by anything, - // let's forward it to the physical page allocator. - lowest = Page::AlignUp(lowest); - highest = Page::AlignUp(highest); - size_t size = highest - lowest; - Page::InitPushRegion(lowest, size); - processed = highest; + processed = modend; + continuing = true; + break; } + if ( lowest <= modstart && modstart < highest ) + highest = modstart; } - // If the physical allocator couldn't handle the vast amount of - // physical pages, it may decide to drop some. This shouldn't happen - // until the pebibyte era of RAM. - if ( 0 < Page::pagesnotonstack ) - { - Log::PrintF("%zu bytes of RAM aren't used due to technical " - "restrictions.\n", Page::pagesnotonstack * 0x1000UL); - } + if ( continuing ) + continue; - Memory::Unmap(0x0); // Remove NULL. + if ( highest <= lowest ) + break; - // Finish allocating the top level PMLs for the kernels use. - AllocateKernelPMLs(); - } - - void Statistics(size_t* amountused, size_t* totalmem) - { - size_t memfree = (Page::stackused - Page::stackreserved) << 12UL; - size_t memused = Page::totalmem - memfree; - if ( amountused ) { *amountused = memused; } - if ( totalmem ) { *totalmem = Page::totalmem; } - } - - // Prepare the non-forkable kernel PMLs such that forking the kernel - // address space will always keep the kernel mapped. - void AllocateKernelPMLs() - { - const addr_t flags = PML_PRESENT | PML_WRITABLE; - - PML* const pml = PMLS[TOPPMLLEVEL]; - - size_t start = ENTRIES / 2; - size_t end = ENTRIES; - - for ( size_t i = start; i < end; i++ ) - { - if ( pml->entry[i] & PML_PRESENT ) { continue; } - - addr_t page = Page::Get(); - if ( !page ) { Panic("out of memory allocating boot PMLs"); } - - pml->entry[i] = page | flags; - - // Invalidate the new PML and reset it to zeroes. - addr_t pmladdr = (addr_t) (PMLS[TOPPMLLEVEL-1] + i); - InvalidatePage(pmladdr); - memset((void*) pmladdr, 0, sizeof(PML)); - } + // Now that we have a continious area not used by anything, + // let's forward it to the physical page allocator. + lowest = Page::AlignUp(lowest); + highest = Page::AlignUp(highest); + size_t size = highest - lowest; + Page::InitPushRegion(lowest, size); + processed = highest; } } - namespace Page + // If the physical allocator couldn't handle the vast amount of + // physical pages, it may decide to drop some. This shouldn't happen + // until the pebibyte era of RAM. + if ( 0 < Page::pagesnotonstack ) + Log::PrintF("%zu bytes of RAM aren't used due to technical " + "restrictions.\n", Page::pagesnotonstack * 0x1000UL); + + Memory::Unmap(0x0); // Remove NULL. + + // Finish allocating the top level PMLs for the kernels use. + AllocateKernelPMLs(); +} + +void Statistics(size_t* amountused, size_t* totalmem) +{ + size_t memfree = (Page::stackused - Page::stackreserved) << 12UL; + size_t memused = Page::totalmem - memfree; + if ( amountused ) + *amountused = memused; + if ( totalmem ) + *totalmem = Page::totalmem; +} + +// Prepare the non-forkable kernel PMLs such that forking the kernel +// address space will always keep the kernel mapped. +void AllocateKernelPMLs() +{ + const addr_t flags = PML_PRESENT | PML_WRITABLE; + + PML* const pml = PMLS[TOPPMLLEVEL]; + + size_t start = ENTRIES / 2; + size_t end = ENTRIES; + + for ( size_t i = start; i < end; i++ ) { - void ExtendStack() - { - // This call will always succeed, if it didn't, then the stack - // wouldn't be full, and thus this function won't be called. - addr_t page = GetUnlocked(); + if ( pml->entry[i] & PML_PRESENT ) + continue; - // This call will also succeed, since there are plenty of physical - // pages available and it might need some. - addr_t virt = (addr_t) (STACK + stacklength); - if ( !Memory::Map(page, virt, PROT_KREAD | PROT_KWRITE) ) - { - Panic("Unable to extend page stack, which should have worked"); - } + addr_t page = Page::Get(); + if ( !page ) + Panic("out of memory allocating boot PMLs"); - // TODO: This may not be needed during the boot process! - //Memory::InvalidatePage((addr_t) (STACK + stacklength)); + pml->entry[i] = page | flags; - stacklength += 4096UL / sizeof(addr_t); - } - - void InitPushRegion(addr_t position, size_t length) - { - // Align our entries on page boundaries. - addr_t newposition = Page::AlignUp(position); - length = Page::AlignDown((position + length) - newposition); - position = newposition; - - while ( length ) - { - if ( unlikely(stackused == stacklength) ) - { - if ( stackused == MAXSTACKLENGTH ) - { - pagesnotonstack += length / 4096UL; - return; - } - - ExtendStack(); - } - - addr_t* stackentry = &(STACK[stackused++]); - *stackentry = position; - - length -= 4096UL; - position += 4096UL; - } - } - - bool ReserveUnlocked(size_t* counter, size_t least, size_t ideal) - { - assert(least < ideal); - size_t available = stackused - stackreserved; - if ( least < available ) { errno = ENOMEM; return false; } - if ( available < ideal ) { ideal = available; } - stackreserved += ideal; - *counter += ideal; - return true; - } - - bool Reserve(size_t* counter, size_t least, size_t ideal) - { - ScopedLock lock(&pagelock); - return ReserveUnlocked(counter, least, ideal); - } - - bool ReserveUnlocked(size_t* counter, size_t amount) - { - return ReserveUnlocked(counter, amount, amount); - } - - bool Reserve(size_t* counter, size_t amount) - { - ScopedLock lock(&pagelock); - return ReserveUnlocked(counter, amount); - } - - addr_t GetReservedUnlocked(size_t* counter) - { - if ( !*counter ) { return 0; } - assert(stackused); // After all, we did _reserve_ the memory. - addr_t result = STACK[--stackused]; - assert(result == AlignDown(result)); - stackreserved--; - (*counter)--; - return result; - } - - addr_t GetReserved(size_t* counter) - { - ScopedLock lock(&pagelock); - return GetReservedUnlocked(counter); - } - - addr_t GetUnlocked() - { - assert(stackreserved <= stackused); - if ( unlikely(stackreserved == stackused) ) - { - errno = ENOMEM; - return 0; - } - addr_t result = STACK[--stackused]; - assert(result == AlignDown(result)); - return result; - } - - addr_t Get() - { - ScopedLock lock(&pagelock); - return GetUnlocked(); - } - - void PutUnlocked(addr_t page) - { - assert(page == AlignDown(page)); - if ( unlikely(stackused == stacklength) ) - { - if ( stackused == MAXSTACKLENGTH ) - { - pagesnotonstack++; - return; - } - ExtendStack(); - } - STACK[stackused++] = page; - } - - void Put(addr_t page) - { - ScopedLock lock(&pagelock); - PutUnlocked(page); - } - - void Lock() - { - kthread_mutex_lock(&pagelock); - } - - void Unlock() - { - kthread_mutex_unlock(&pagelock); - } - } - - namespace Memory - { - addr_t ProtectionToPMLFlags(int prot) - { - addr_t result = 0; - if ( prot & PROT_EXEC ) { result |= PML_USERSPACE; } - if ( prot & PROT_READ ) { result |= PML_USERSPACE; } - if ( prot & PROT_WRITE ) { result |= PML_USERSPACE | PML_WRITABLE; } - if ( prot & PROT_KEXEC ) { result |= 0; } - if ( prot & PROT_KREAD ) { result |= 0; } - if ( prot & PROT_KWRITE ) { result |= 0; } - if ( prot & PROT_FORK ) { result |= PML_FORK; } - return result; - } - - int PMLFlagsToProtection(addr_t flags) - { - int prot = PROT_KREAD | PROT_KWRITE | PROT_KEXEC; - bool user = flags & PML_USERSPACE; - bool write = flags & PML_WRITABLE; - if ( user ) { prot |= PROT_EXEC | PROT_READ; } - if ( user && write ) { prot |= PROT_WRITE; } - return prot; - } - - int ProvidedProtection(int prot) - { - addr_t flags = ProtectionToPMLFlags(prot); - return PMLFlagsToProtection(flags); - } - - bool LookUp(addr_t mapto, addr_t* physical, int* protection) - { - // Translate the virtual address into PML indexes. - const size_t MASK = (1<> (12+(i-1)*TRANSBITS)) & MASK; - } - - int prot = PROT_USER | PROT_KERNEL | PROT_FORK; - - // For each PML level, make sure it exists. - size_t offset = 0; - for ( size_t i = TOPPMLLEVEL; i > 1; i-- ) - { - size_t childid = pmlchildid[i]; - PML* pml = PMLS[i] + offset; - - addr_t entry = pml->entry[childid]; - if ( !(entry & PML_PRESENT) ) { return false; } - int entryflags = entry & PML_ADDRESS; - int entryprot = PMLFlagsToProtection(entryflags); - prot &= entryprot; - - // Find the index of the next PML in the fractal mapped memory. - offset = offset * ENTRIES + childid; - } - - addr_t entry = (PMLS[1] + offset)->entry[pmlchildid[1]]; - if ( !(entry & PML_PRESENT) ) { return false; } - - int entryflags = entry & PML_ADDRESS; - int entryprot = PMLFlagsToProtection(entryflags); - prot &= entryprot; - addr_t phys = entry & PML_ADDRESS; - - if ( physical ) { *physical = phys; } - if ( protection ) { *protection = prot; } - - return true; - } - - void InvalidatePage(addr_t /*addr*/) - { - // TODO: Actually just call the instruction. - Flush(); - } - - // Flushes the Translation Lookaside Buffer (TLB). - void Flush() - { - asm volatile("mov %0, %%cr3":: "r"(currentdir)); - } - - addr_t GetAddressSpace() - { - return currentdir; - } - - addr_t SwitchAddressSpace(addr_t addrspace) - { - // Have fun debugging this. - if ( currentdir != Page::AlignDown(currentdir) ) - { - PanicF("The variable containing the current address space " - "contains garbage all of sudden: it isn't page-aligned. " - "It contains the value 0x%zx.", currentdir); - } - - // Don't switch if we are already there. - if ( addrspace == currentdir ) { return currentdir; } - - if ( addrspace & 0xFFFUL ) { PanicF("addrspace 0x%zx was not page-aligned!", addrspace); } - - addr_t previous = currentdir; - - // Switch and flush the TLB. - asm volatile("mov %0, %%cr3":: "r"(addrspace)); - - currentdir = addrspace; - - return previous; - } - - bool MapRange(addr_t where, size_t bytes, int protection) - { - for ( addr_t page = where; page < where + bytes; page += 4096UL ) - { - addr_t physicalpage = Page::Get(); - if ( physicalpage == 0 ) - { - while ( where < page ) - { - page -= 4096UL; - physicalpage = Unmap(page); - Page::Put(physicalpage); - } - return false; - } - - Map(physicalpage, page, protection); - } - - return true; - } - - bool UnmapRange(addr_t where, size_t bytes) - { - for ( addr_t page = where; page < where + bytes; page += 4096UL ) - { - addr_t physicalpage = Unmap(page); - Page::Put(physicalpage); - } - return true; - } - - static bool MapInternal(addr_t physical, addr_t mapto, int prot, addr_t extraflags = 0) - { - addr_t flags = ProtectionToPMLFlags(prot) | PML_PRESENT; - - // Translate the virtual address into PML indexes. - const size_t MASK = (1<> (12+(i-1)*TRANSBITS)) & MASK; - } - - // For each PML level, make sure it exists. - size_t offset = 0; - for ( size_t i = TOPPMLLEVEL; i > 1; i-- ) - { - size_t childid = pmlchildid[i]; - PML* pml = PMLS[i] + offset; - - addr_t& entry = pml->entry[childid]; - - // Find the index of the next PML in the fractal mapped memory. - size_t childoffset = offset * ENTRIES + childid; - - if ( !(entry & PML_PRESENT) ) - { - // TODO: Possible memory leak when page allocation fails. - addr_t page = Page::Get(); - - if ( !page ) { return false; } - addr_t pmlflags = PML_PRESENT | PML_WRITABLE | PML_USERSPACE - | PML_FORK; - entry = page | pmlflags; - - // Invalidate the new PML and reset it to zeroes. - addr_t pmladdr = (addr_t) (PMLS[i-1] + childoffset); - InvalidatePage(pmladdr); - memset((void*) pmladdr, 0, sizeof(PML)); - } - - offset = childoffset; - } - - // Actually map the physical page to the virtual page. - const addr_t entry = physical | flags | extraflags; - (PMLS[1] + offset)->entry[pmlchildid[1]] = entry; - return true; - } - - bool Map(addr_t physical, addr_t mapto, int prot) - { - return MapInternal(physical, mapto, prot); - } - - void PageProtect(addr_t mapto, int protection) - { - addr_t phys; - if ( !LookUp(mapto, &phys, NULL) ) - return; - Map(phys, mapto, protection); - } - - void PageProtectAdd(addr_t mapto, int protection) - { - addr_t phys; - int prot; - if ( !LookUp(mapto, &phys, &prot) ) - return; - prot |= protection; - Map(phys, mapto, prot); - } - - void PageProtectSub(addr_t mapto, int protection) - { - addr_t phys; - int prot; - if ( !LookUp(mapto, &phys, &prot) ) - return; - prot &= ~protection; - Map(phys, mapto, prot); - } - - addr_t Unmap(addr_t mapto) - { - // Translate the virtual address into PML indexes. - const size_t MASK = (1<> (12+(i-1)*TRANSBITS)) & MASK; - } - - // For each PML level, make sure it exists. - size_t offset = 0; - for ( size_t i = TOPPMLLEVEL; i > 1; i-- ) - { - size_t childid = pmlchildid[i]; - PML* pml = PMLS[i] + offset; - - addr_t& entry = pml->entry[childid]; - - if ( !(entry & PML_PRESENT) ) - { - PanicF("Attempted to unmap virtual page %p, but the virtual" - " page was wasn't mapped. This is a bug in the code " - "code calling this function", mapto); - } - - // Find the index of the next PML in the fractal mapped memory. - offset = offset * ENTRIES + childid; - } - - addr_t& entry = (PMLS[1] + offset)->entry[pmlchildid[1]]; - addr_t result = entry & PML_ADDRESS; - entry = 0; - - // TODO: If all the entries in PML[N] are not-present, then who - // unmaps its entry from PML[N-1]? - - return result; - } - - bool MapPAT(addr_t physical, addr_t mapto, int prot, addr_t mtype) - { - addr_t extraflags = PAT2PMLFlags[mtype]; - return MapInternal(physical, mapto, prot, extraflags); - } - - void ForkCleanup(size_t i, size_t level) - { - PML* destpml = FORKPML + level; - if ( !i ) { return; } - for ( size_t n = 0; n < i-1; n++ ) - { - addr_t entry = destpml->entry[i]; - if ( !(entry & PML_FORK ) ) { continue; } - addr_t phys = entry & PML_ADDRESS; - if ( 1 < level ) - { - addr_t destaddr = (addr_t) (FORKPML + level-1); - Map(phys, destaddr, PROT_KREAD | PROT_KWRITE); - InvalidatePage(destaddr); - ForkCleanup(ENTRIES+1UL, level-1); - } - Page::Put(phys); - } - } - - // TODO: Copying every frame is endlessly useless in many uses. It'd be - // nice to upgrade this to a copy-on-write algorithm. - bool Fork(size_t level, size_t pmloffset) - { - PML* destpml = FORKPML + level; - for ( size_t i = 0; i < ENTRIES; i++ ) - { - addr_t entry = (PMLS[level] + pmloffset)->entry[i]; - - // Link the entry if it isn't supposed to be forked. - if ( !(entry & PML_FORK ) ) - { - destpml->entry[i] = entry; - continue; - } - - addr_t phys = Page::Get(); - if ( unlikely(!phys) ) { ForkCleanup(i, level); return false; } - - addr_t flags = entry & PML_FLAGS; - destpml->entry[i] = phys | flags; - - // Map the destination page. - addr_t destaddr = (addr_t) (FORKPML + level-1); - Map(phys, destaddr, PROT_KREAD | PROT_KWRITE); - InvalidatePage(destaddr); - - size_t offset = pmloffset * ENTRIES + i; - - if ( 1 < level ) - { - if ( !Fork(level-1, offset) ) - { - Page::Put(phys); - ForkCleanup(i, level); - return false; - } - continue; - } - - // Determine the source page's address. - const void* src = (const void*) (offset * 4096UL); - - // Determine the destination page's address. - void* dest = (void*) (FORKPML + level - 1); - - memcpy(dest, src, 4096UL); - } - - return true; - } - - bool Fork(addr_t dir, size_t level, size_t pmloffset) - { - PML* destpml = FORKPML + level; - - // This call always succeeds. - Map(dir, (addr_t) destpml, PROT_KREAD | PROT_KWRITE); - InvalidatePage((addr_t) destpml); - - return Fork(level, pmloffset); - } - - // Create an exact copy of the current address space. - addr_t Fork() - { - addr_t dir = Page::Get(); - if ( dir == 0 ) { return 0; } - if ( !Fork(dir, TOPPMLLEVEL, 0) ) { Page::Put(dir); return 0; } - - // Now, the new top pml needs to have its fractal memory fixed. - const addr_t flags = PML_PRESENT | PML_WRITABLE; - addr_t mapto; - addr_t childaddr; - - (FORKPML + TOPPMLLEVEL)->entry[ENTRIES-1] = dir | flags; - childaddr = (FORKPML + TOPPMLLEVEL)->entry[ENTRIES-2] & PML_ADDRESS; - - for ( size_t i = TOPPMLLEVEL-1; i > 0; i-- ) - { - mapto = (addr_t) (FORKPML + i); - Map(childaddr, mapto, PROT_KREAD | PROT_KWRITE); - InvalidatePage(mapto); - (FORKPML + i)->entry[ENTRIES-1] = dir | flags; - childaddr = (FORKPML + i)->entry[ENTRIES-2] & PML_ADDRESS; - } - return dir; - } + // Invalidate the new PML and reset it to zeroes. + addr_t pmladdr = (addr_t) (PMLS[TOPPMLLEVEL-1] + i); + InvalidatePage(pmladdr); + memset((void*) pmladdr, 0, sizeof(PML)); } } + +} // namespace Memory +} // namespace Sortix + +namespace Sortix { +namespace Page { + +void ExtendStack() +{ + // This call will always succeed, if it didn't, then the stack + // wouldn't be full, and thus this function won't be called. + addr_t page = GetUnlocked(); + + // This call will also succeed, since there are plenty of physical + // pages available and it might need some. + addr_t virt = (addr_t) (STACK + stacklength); + if ( !Memory::Map(page, virt, PROT_KREAD | PROT_KWRITE) ) + Panic("Unable to extend page stack, which should have worked"); + + // TODO: This may not be needed during the boot process! + //Memory::InvalidatePage((addr_t) (STACK + stacklength)); + + stacklength += 4096UL / sizeof(addr_t); +} + +void InitPushRegion(addr_t position, size_t length) +{ + // Align our entries on page boundaries. + addr_t newposition = Page::AlignUp(position); + length = Page::AlignDown((position + length) - newposition); + position = newposition; + + while ( length ) + { + if ( unlikely(stackused == stacklength) ) + { + if ( stackused == MAXSTACKLENGTH ) + { + pagesnotonstack += length / 4096UL; + return; + } + + ExtendStack(); + } + + addr_t* stackentry = &(STACK[stackused++]); + *stackentry = position; + + length -= 4096UL; + position += 4096UL; + } +} + +bool ReserveUnlocked(size_t* counter, size_t least, size_t ideal) +{ + assert(least < ideal); + size_t available = stackused - stackreserved; + if ( least < available ) + return errno = ENOMEM, false; + if ( available < ideal ) + ideal = available; + stackreserved += ideal; + *counter += ideal; + return true; +} + +bool Reserve(size_t* counter, size_t least, size_t ideal) +{ + ScopedLock lock(&pagelock); + return ReserveUnlocked(counter, least, ideal); +} + +bool ReserveUnlocked(size_t* counter, size_t amount) +{ + return ReserveUnlocked(counter, amount, amount); +} + +bool Reserve(size_t* counter, size_t amount) +{ + ScopedLock lock(&pagelock); + return ReserveUnlocked(counter, amount); +} + +addr_t GetReservedUnlocked(size_t* counter) +{ + if ( !*counter ) + return 0; + assert(stackused); // After all, we did _reserve_ the memory. + addr_t result = STACK[--stackused]; + assert(result == AlignDown(result)); + stackreserved--; + (*counter)--; + return result; +} + +addr_t GetReserved(size_t* counter) +{ + ScopedLock lock(&pagelock); + return GetReservedUnlocked(counter); +} + +addr_t GetUnlocked() +{ + assert(stackreserved <= stackused); + if ( unlikely(stackreserved == stackused) ) + return errno = ENOMEM, 0; + addr_t result = STACK[--stackused]; + assert(result == AlignDown(result)); + return result; +} + +addr_t Get() +{ + ScopedLock lock(&pagelock); + return GetUnlocked(); +} + +void PutUnlocked(addr_t page) +{ + assert(page == AlignDown(page)); + if ( unlikely(stackused == stacklength) ) + { + if ( stackused == MAXSTACKLENGTH ) + { + pagesnotonstack++; + return; + } + ExtendStack(); + } + STACK[stackused++] = page; +} + +void Put(addr_t page) +{ + ScopedLock lock(&pagelock); + PutUnlocked(page); +} + +void Lock() +{ + kthread_mutex_lock(&pagelock); +} + +void Unlock() +{ + kthread_mutex_unlock(&pagelock); +} + +} // namespace Page +} // namespace Sortix + +namespace Sortix { +namespace Memory { + +addr_t ProtectionToPMLFlags(int prot) +{ + addr_t result = 0; + if ( prot & PROT_EXEC ) { result |= PML_USERSPACE; } + if ( prot & PROT_READ ) { result |= PML_USERSPACE; } + if ( prot & PROT_WRITE ) { result |= PML_USERSPACE | PML_WRITABLE; } + if ( prot & PROT_KEXEC ) { result |= 0; } + if ( prot & PROT_KREAD ) { result |= 0; } + if ( prot & PROT_KWRITE ) { result |= 0; } + if ( prot & PROT_FORK ) { result |= PML_FORK; } + return result; +} + +int PMLFlagsToProtection(addr_t flags) +{ + int prot = PROT_KREAD | PROT_KWRITE | PROT_KEXEC; + bool user = flags & PML_USERSPACE; + bool write = flags & PML_WRITABLE; + if ( user ) + prot |= PROT_EXEC | PROT_READ; + if ( user && write ) + prot |= PROT_WRITE; + return prot; +} + +int ProvidedProtection(int prot) +{ + addr_t flags = ProtectionToPMLFlags(prot); + return PMLFlagsToProtection(flags); +} + +bool LookUp(addr_t mapto, addr_t* physical, int* protection) +{ + // Translate the virtual address into PML indexes. + const size_t MASK = (1<> (12 + (i-1) * TRANSBITS) & MASK; + + int prot = PROT_USER | PROT_KERNEL | PROT_FORK; + + // For each PML level, make sure it exists. + size_t offset = 0; + for ( size_t i = TOPPMLLEVEL; i > 1; i-- ) + { + size_t childid = pmlchildid[i]; + PML* pml = PMLS[i] + offset; + + addr_t entry = pml->entry[childid]; + if ( !(entry & PML_PRESENT) ) + return false; + int entryflags = entry & PML_ADDRESS; + int entryprot = PMLFlagsToProtection(entryflags); + prot &= entryprot; + + // Find the index of the next PML in the fractal mapped memory. + offset = offset * ENTRIES + childid; + } + + addr_t entry = (PMLS[1] + offset)->entry[pmlchildid[1]]; + if ( !(entry & PML_PRESENT) ) + return false; + + int entryflags = entry & PML_ADDRESS; + int entryprot = PMLFlagsToProtection(entryflags); + prot &= entryprot; + addr_t phys = entry & PML_ADDRESS; + + if ( physical ) + *physical = phys; + if ( protection ) + *protection = prot; + + return true; +} + +void InvalidatePage(addr_t /*addr*/) +{ + // TODO: Actually just call the instruction. + Flush(); +} + +// Flushes the Translation Lookaside Buffer (TLB). +void Flush() +{ + asm volatile("mov %0, %%cr3":: "r"(currentdir)); +} + +addr_t GetAddressSpace() +{ + return currentdir; +} + +addr_t SwitchAddressSpace(addr_t addrspace) +{ + // Have fun debugging this. + if ( currentdir != Page::AlignDown(currentdir) ) + PanicF("The variable containing the current address space " + "contains garbage all of sudden: it isn't page-aligned. " + "It contains the value 0x%zx.", currentdir); + + // Don't switch if we are already there. + if ( addrspace == currentdir ) + return currentdir; + + if ( addrspace & 0xFFFUL ) + PanicF("addrspace 0x%zx was not page-aligned!", addrspace); + + addr_t previous = currentdir; + + // Switch and flush the TLB. + asm volatile("mov %0, %%cr3":: "r"(addrspace)); + + currentdir = addrspace; + + return previous; +} + +bool MapRange(addr_t where, size_t bytes, int protection) +{ + for ( addr_t page = where; page < where + bytes; page += 4096UL ) + { + addr_t physicalpage = Page::Get(); + if ( physicalpage == 0 ) + { + while ( where < page ) + { + page -= 4096UL; + physicalpage = Unmap(page); + Page::Put(physicalpage); + } + return false; + } + + Map(physicalpage, page, protection); + } + + return true; +} + +bool UnmapRange(addr_t where, size_t bytes) +{ + for ( addr_t page = where; page < where + bytes; page += 4096UL ) + { + addr_t physicalpage = Unmap(page); + Page::Put(physicalpage); + } + return true; +} + +static bool MapInternal(addr_t physical, addr_t mapto, int prot, addr_t extraflags = 0) +{ + addr_t flags = ProtectionToPMLFlags(prot) | PML_PRESENT; + + // Translate the virtual address into PML indexes. + const size_t MASK = (1<> (12 + (i-1) * TRANSBITS) & MASK; + + // For each PML level, make sure it exists. + size_t offset = 0; + for ( size_t i = TOPPMLLEVEL; i > 1; i-- ) + { + size_t childid = pmlchildid[i]; + PML* pml = PMLS[i] + offset; + + addr_t& entry = pml->entry[childid]; + + // Find the index of the next PML in the fractal mapped memory. + size_t childoffset = offset * ENTRIES + childid; + + if ( !(entry & PML_PRESENT) ) + { + // TODO: Possible memory leak when page allocation fails. + addr_t page = Page::Get(); + + if ( !page ) + return false; + addr_t pmlflags = PML_PRESENT | PML_WRITABLE | PML_USERSPACE + | PML_FORK; + entry = page | pmlflags; + + // Invalidate the new PML and reset it to zeroes. + addr_t pmladdr = (addr_t) (PMLS[i-1] + childoffset); + InvalidatePage(pmladdr); + memset((void*) pmladdr, 0, sizeof(PML)); + } + + offset = childoffset; + } + + // Actually map the physical page to the virtual page. + const addr_t entry = physical | flags | extraflags; + (PMLS[1] + offset)->entry[pmlchildid[1]] = entry; + return true; +} + +bool Map(addr_t physical, addr_t mapto, int prot) +{ + return MapInternal(physical, mapto, prot); +} + +void PageProtect(addr_t mapto, int protection) +{ + addr_t phys; + if ( !LookUp(mapto, &phys, NULL) ) + return; + Map(phys, mapto, protection); +} + +void PageProtectAdd(addr_t mapto, int protection) +{ + addr_t phys; + int prot; + if ( !LookUp(mapto, &phys, &prot) ) + return; + prot |= protection; + Map(phys, mapto, prot); +} + +void PageProtectSub(addr_t mapto, int protection) +{ + addr_t phys; + int prot; + if ( !LookUp(mapto, &phys, &prot) ) + return; + prot &= ~protection; + Map(phys, mapto, prot); +} + +addr_t Unmap(addr_t mapto) +{ + // Translate the virtual address into PML indexes. + const size_t MASK = (1<> (12 + (i-1) * TRANSBITS) & MASK; + } + + // For each PML level, make sure it exists. + size_t offset = 0; + for ( size_t i = TOPPMLLEVEL; i > 1; i-- ) + { + size_t childid = pmlchildid[i]; + PML* pml = PMLS[i] + offset; + + addr_t& entry = pml->entry[childid]; + + if ( !(entry & PML_PRESENT) ) + PanicF("Attempted to unmap virtual page %p, but the virtual" + " page was wasn't mapped. This is a bug in the code " + "code calling this function", mapto); + + // Find the index of the next PML in the fractal mapped memory. + offset = offset * ENTRIES + childid; + } + + addr_t& entry = (PMLS[1] + offset)->entry[pmlchildid[1]]; + addr_t result = entry & PML_ADDRESS; + entry = 0; + + // TODO: If all the entries in PML[N] are not-present, then who + // unmaps its entry from PML[N-1]? + + return result; +} + +bool MapPAT(addr_t physical, addr_t mapto, int prot, addr_t mtype) +{ + addr_t extraflags = PAT2PMLFlags[mtype]; + return MapInternal(physical, mapto, prot, extraflags); +} + +void ForkCleanup(size_t i, size_t level) +{ + PML* destpml = FORKPML + level; + if ( !i ) + return; + for ( size_t n = 0; n < i-1; n++ ) + { + addr_t entry = destpml->entry[i]; + if ( !(entry & PML_FORK ) ) + continue; + addr_t phys = entry & PML_ADDRESS; + if ( 1 < level ) + { + addr_t destaddr = (addr_t) (FORKPML + level-1); + Map(phys, destaddr, PROT_KREAD | PROT_KWRITE); + InvalidatePage(destaddr); + ForkCleanup(ENTRIES+1UL, level-1); + } + Page::Put(phys); + } +} + +// TODO: Copying every frame is endlessly useless in many uses. It'd be +// nice to upgrade this to a copy-on-write algorithm. +bool Fork(size_t level, size_t pmloffset) +{ + PML* destpml = FORKPML + level; + for ( size_t i = 0; i < ENTRIES; i++ ) + { + addr_t entry = (PMLS[level] + pmloffset)->entry[i]; + + // Link the entry if it isn't supposed to be forked. + if ( !(entry & PML_FORK ) ) + { + destpml->entry[i] = entry; + continue; + } + + addr_t phys = Page::Get(); + if ( unlikely(!phys) ) + { + ForkCleanup(i, level); + return false; + } + + addr_t flags = entry & PML_FLAGS; + destpml->entry[i] = phys | flags; + + // Map the destination page. + addr_t destaddr = (addr_t) (FORKPML + level-1); + Map(phys, destaddr, PROT_KREAD | PROT_KWRITE); + InvalidatePage(destaddr); + + size_t offset = pmloffset * ENTRIES + i; + + if ( 1 < level ) + { + if ( !Fork(level-1, offset) ) + { + Page::Put(phys); + ForkCleanup(i, level); + return false; + } + continue; + } + + // Determine the source page's address. + const void* src = (const void*) (offset * 4096UL); + + // Determine the destination page's address. + void* dest = (void*) (FORKPML + level - 1); + + memcpy(dest, src, 4096UL); + } + + return true; +} + +bool Fork(addr_t dir, size_t level, size_t pmloffset) +{ + PML* destpml = FORKPML + level; + + // This call always succeeds. + Map(dir, (addr_t) destpml, PROT_KREAD | PROT_KWRITE); + InvalidatePage((addr_t) destpml); + + return Fork(level, pmloffset); +} + +// Create an exact copy of the current address space. +addr_t Fork() +{ + addr_t dir = Page::Get(); + if ( dir == 0 ) + return 0; + if ( !Fork(dir, TOPPMLLEVEL, 0) ) + { + Page::Put(dir); + return 0; + } + + // Now, the new top pml needs to have its fractal memory fixed. + const addr_t flags = PML_PRESENT | PML_WRITABLE; + addr_t mapto; + addr_t childaddr; + + (FORKPML + TOPPMLLEVEL)->entry[ENTRIES-1] = dir | flags; + childaddr = (FORKPML + TOPPMLLEVEL)->entry[ENTRIES-2] & PML_ADDRESS; + + for ( size_t i = TOPPMLLEVEL-1; i > 0; i-- ) + { + mapto = (addr_t) (FORKPML + i); + Map(childaddr, mapto, PROT_KREAD | PROT_KWRITE); + InvalidatePage(mapto); + (FORKPML + i)->entry[ENTRIES-1] = dir | flags; + childaddr = (FORKPML + i)->entry[ENTRIES-2] & PML_ADDRESS; + } + return dir; +} + +} // namespace Memory +} // namespace Sortix diff --git a/kernel/x86-family/memorymanagement.h b/kernel/x86-family/memorymanagement.h index 07d6c07f..503df818 100644 --- a/kernel/x86-family/memorymanagement.h +++ b/kernel/x86-family/memorymanagement.h @@ -1,6 +1,6 @@ /******************************************************************************* - Copyright(C) Jonas 'Sortie' Termansen 2011, 2012. + Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2014. This file is part of Sortix. @@ -25,73 +25,81 @@ #ifndef SORTIX_X86_FAMILY_MEMORYMANAGEMENT_H #define SORTIX_X86_FAMILY_MEMORYMANAGEMENT_H -namespace Sortix -{ - struct PML - { - addr_t entry[4096 / sizeof(addr_t)]; - }; +namespace Sortix { - namespace Memory - { - const addr_t PML_PRESENT = (1<<0); - const addr_t PML_WRITABLE = (1<<1); - const addr_t PML_USERSPACE = (1<<2); - const addr_t PML_WRTHROUGH = (1<<3); - const addr_t PML_NOCACHE = (1<<4); - const addr_t PML_PAT = (1<<7); - const addr_t PML_AVAILABLE1 = (1<<9); - const addr_t PML_AVAILABLE2 = (1<<10); - const addr_t PML_AVAILABLE3 = (1<<11); - const addr_t PML_FORK = PML_AVAILABLE1; - const addr_t PML_FLAGS = (0xFFFUL); // Bits used for the flags. - const addr_t PML_ADDRESS = (~0xFFFUL); // Bits used for the address. - const addr_t PAT_UC = 0x00; // Uncacheable - const addr_t PAT_WC = 0x01; // Write-Combine - const addr_t PAT_WT = 0x04; // Writethrough - const addr_t PAT_WP = 0x05; // Write-Protect - const addr_t PAT_WB = 0x06; // Writeback - const addr_t PAT_UCM = 0x07; // Uncacheable, overruled by MTRR. - const addr_t PAT_NUM = 0x08; - // Desired PAT-Register PA-Field Indexing (different from BIOS defaults) - const addr_t PA[PAT_NUM] = - { - PAT_WB, - PAT_WT, - PAT_UCM, - PAT_UC, - PAT_WC, - PAT_WP, - 0, - 0, - }; - // Inverse function of the above. - const addr_t PAINV[PAT_NUM] = - { - 3, // UC - 4, // WC - 7, // No such - 8, // No such - 1, // WT - 5, // WP, - 0, // WB - 2, // UCM - }; - static inline addr_t EncodePATAsPMLFlag(addr_t pat) - { - pat = PAINV[pat]; - addr_t result = 0; - if ( pat & 0x1 ) { result |= PML_WRTHROUGH; } - if ( pat & 0x2 ) { result |= PML_NOCACHE; } - if ( pat & 0x4 ) { result |= PML_PAT; } - return result; - } - bool MapPAT(addr_t physical, addr_t mapto, int prot, addr_t mtype); - addr_t ProtectionToPMLFlags(int prot); - int PMLFlagsToProtection(addr_t flags); - } +struct PML +{ + addr_t entry[4096 / sizeof(addr_t)]; +}; + +} // namespace Sortix + +namespace Sortix { +namespace Memory { + +const addr_t PML_PRESENT = 1 << 0; +const addr_t PML_WRITABLE = 1 << 1; +const addr_t PML_USERSPACE = 1 << 2; +const addr_t PML_WRTHROUGH = 1 << 3; +const addr_t PML_NOCACHE = 1 << 4; +const addr_t PML_PAT = 1 << 7; +const addr_t PML_AVAILABLE1 = 1 << 9; +const addr_t PML_AVAILABLE2 = 1 << 10; +const addr_t PML_AVAILABLE3 = 1 << 11; +const addr_t PML_FORK = PML_AVAILABLE1; +const addr_t PML_FLAGS = 0xFFFUL; // Bits used for the flags. +const addr_t PML_ADDRESS = ~0xFFFUL; // Bits used for the address. +const addr_t PAT_UC = 0x00; // Uncacheable +const addr_t PAT_WC = 0x01; // Write-Combine +const addr_t PAT_WT = 0x04; // Writethrough +const addr_t PAT_WP = 0x05; // Write-Protect +const addr_t PAT_WB = 0x06; // Writeback +const addr_t PAT_UCM = 0x07; // Uncacheable, overruled by MTRR. +const addr_t PAT_NUM = 0x08; + +// Desired PAT-Register PA-Field Indexing (different from BIOS defaults) +const addr_t PA[PAT_NUM] = +{ + PAT_WB, + PAT_WT, + PAT_UCM, + PAT_UC, + PAT_WC, + PAT_WP, + 0, + 0, +}; + +// Inverse function of the above. +const addr_t PAINV[PAT_NUM] = +{ + 3, // UC + 4, // WC + 7, // No such + 8, // No such + 1, // WT + 5, // WP, + 0, // WB + 2, // UCM +}; + +static inline addr_t EncodePATAsPMLFlag(addr_t pat) +{ + pat = PAINV[pat]; + addr_t result = 0; + if ( pat & 0x1 ) { result |= PML_WRTHROUGH; } + if ( pat & 0x2 ) { result |= PML_NOCACHE; } + if ( pat & 0x4 ) { result |= PML_PAT; } + return result; } +bool MapPAT(addr_t physical, addr_t mapto, int prot, addr_t mtype); +addr_t ProtectionToPMLFlags(int prot); +int PMLFlagsToProtection(addr_t flags); + +} // namespace Memory +} // namespace Sortix + #if defined(__i386__) #include "../x86/memorymanagement.h" #elif defined(__x86_64__) diff --git a/kernel/x86/memorymanagement.cpp b/kernel/x86/memorymanagement.cpp index bdc6dc37..ec8f3b4b 100644 --- a/kernel/x86/memorymanagement.cpp +++ b/kernel/x86/memorymanagement.cpp @@ -1,6 +1,6 @@ /******************************************************************************* - Copyright(C) Jonas 'Sortie' Termansen 2011, 2012. + Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2014. This file is part of Sortix. @@ -32,167 +32,172 @@ #include "multiboot.h" -namespace Sortix +namespace Sortix { +namespace Page { + +extern size_t stackused; +extern size_t stacklength; +void ExtendStack(); + +} // namespace Page +} // namespace Sortix + +namespace Sortix { +namespace Memory { + +extern addr_t currentdir; + +void InitCPU() { - namespace Page + PML* const BOOTPML2 = (PML* const) 0x11000UL; + PML* const BOOTPML1 = (PML* const) 0x12000UL; + //PML* const FORKPML1 = (PML* const) 0x13000UL; + PML* const IDENPML1 = (PML* const) 0x14000UL; + + // Initialize the memory structures with zeroes. + memset((PML* const) 0x11000UL, 0, 0x6000UL); + + // Identity map the first 4 MiB. + addr_t flags = PML_PRESENT | PML_WRITABLE; + + BOOTPML2->entry[0] = ((addr_t) IDENPML1) | flags; + + for ( size_t i = 0; i < ENTRIES; i++ ) + IDENPML1->entry[i] = (i * 4096UL) | flags; + + // Next order of business is to map the virtual memory structures + // to the pre-defined locations in the virtual address space. + + // Fractal map the PML1s. + BOOTPML2->entry[1023] = (addr_t) BOOTPML2 | flags; + + // Fractal map the PML2s. + BOOTPML2->entry[1022] = (addr_t) BOOTPML1 | flags | PML_FORK; + BOOTPML1->entry[1023] = (addr_t) BOOTPML2 | flags; + + // Add some predefined room for forking address spaces. + BOOTPML1->entry[0] = 0; // (addr_t) FORKPML1 | flags | PML_FORK; + + // The virtual memory structures are now available on the predefined + // locations. This means the virtual memory code is bootstrapped. Of + // course, we still have no physical page allocator, so that's the + // next step. + + PML* const PHYSPML1 = (PML* const) 0x15000UL; + PML* const PHYSPML0 = (PML* const) 0x16000UL; + + BOOTPML2->entry[1021] = (addr_t) PHYSPML1 | flags; + PHYSPML1->entry[0] = (addr_t) PHYSPML0 | flags; + + // Alright, enable virtual memory! + SwitchAddressSpace((addr_t) BOOTPML2); + + size_t cr0; + asm volatile("mov %%cr0, %0": "=r"(cr0)); + cr0 |= 0x80000000UL; /* Enable paging! */ + asm volatile("mov %0, %%cr0":: "r"(cr0)); + + Page::stackused = 0; + Page::stacklength = 4096UL / sizeof(addr_t); + + // The physical memory allocator should now be ready for use. Next + // up, the calling function will fill up the physical allocator with + // plenty of nice physical pages. (see Page::InitPushRegion) +} + +// Please note that even if this function exists, you should still clean +// up the address space of a process _before_ calling +// DestroyAddressSpace. This is just a hack because it currently is +// impossible to clean up PLM1's using the MM api! +// --- +// TODO: This function is duplicated in {x86,x64}/memorymanagement.cpp! +// --- +void RecursiveFreeUserspacePages(size_t level, size_t offset) +{ + PML* pml = PMLS[level] + offset; + for ( size_t i = 0; i < ENTRIES; i++ ) { - extern size_t stackused; - extern size_t stacklength; - void ExtendStack(); - } - - namespace Memory - { - extern addr_t currentdir; - - void InitCPU() - { - PML* const BOOTPML2 = (PML* const) 0x11000UL; - PML* const BOOTPML1 = (PML* const) 0x12000UL; - //PML* const FORKPML1 = (PML* const) 0x13000UL; - PML* const IDENPML1 = (PML* const) 0x14000UL; - - // Initialize the memory structures with zeroes. - memset((PML* const) 0x11000UL, 0, 0x6000UL); - - // Identity map the first 4 MiB. - addr_t flags = PML_PRESENT | PML_WRITABLE; - - BOOTPML2->entry[0] = ((addr_t) IDENPML1) | flags; - - for ( size_t i = 0; i < ENTRIES; i++ ) - { - IDENPML1->entry[i] = (i * 4096UL) | flags; - } - - // Next order of business is to map the virtual memory structures - // to the pre-defined locations in the virtual address space. - - // Fractal map the PML1s. - BOOTPML2->entry[1023] = (addr_t) BOOTPML2 | flags; - - // Fractal map the PML2s. - BOOTPML2->entry[1022] = (addr_t) BOOTPML1 | flags | PML_FORK; - BOOTPML1->entry[1023] = (addr_t) BOOTPML2 | flags; - - // Add some predefined room for forking address spaces. - BOOTPML1->entry[0] = 0; // (addr_t) FORKPML1 | flags | PML_FORK; - - // The virtual memory structures are now available on the predefined - // locations. This means the virtual memory code is bootstrapped. Of - // course, we still have no physical page allocator, so that's the - // next step. - - PML* const PHYSPML1 = (PML* const) 0x15000UL; - PML* const PHYSPML0 = (PML* const) 0x16000UL; - - BOOTPML2->entry[1021] = (addr_t) PHYSPML1 | flags; - PHYSPML1->entry[0] = (addr_t) PHYSPML0 | flags; - - // Alright, enable virtual memory! - SwitchAddressSpace((addr_t) BOOTPML2); - - size_t cr0; - asm volatile("mov %%cr0, %0": "=r"(cr0)); - cr0 |= 0x80000000UL; /* Enable paging! */ - asm volatile("mov %0, %%cr0":: "r"(cr0)); - - Page::stackused = 0; - Page::stacklength = 4096UL / sizeof(addr_t); - - // The physical memory allocator should now be ready for use. Next - // up, the calling function will fill up the physical allocator with - // plenty of nice physical pages. (see Page::InitPushRegion) - } - - // Please note that even if this function exists, you should still clean - // up the address space of a process _before_ calling - // DestroyAddressSpace. This is just a hack because it currently is - // impossible to clean up PLM1's using the MM api! - // --- - // TODO: This function is duplicated in {x86,x64}/memorymanagement.cpp! - // --- - void RecursiveFreeUserspacePages(size_t level, size_t offset) - { - PML* pml = PMLS[level] + offset; - for ( size_t i = 0; i < ENTRIES; i++ ) - { - addr_t entry = pml->entry[i]; - if ( !(entry & PML_PRESENT) ) { continue; } - if ( !(entry & PML_USERSPACE) ) { continue; } - if ( !(entry & PML_FORK) ) { continue; } - if ( level > 1 ) { RecursiveFreeUserspacePages(level-1, offset * ENTRIES + i); } - addr_t addr = pml->entry[i] & PML_ADDRESS; - // No need to unmap the page, we just need to mark it as unused. - Page::PutUnlocked(addr); - } - } - - void DestroyAddressSpace(addr_t fallback, void (*func)(addr_t, void*), void* user) - { - // Look up the last few entries used for the fractal mapping. These - // cannot be unmapped as that would destroy the world. Instead, we - // will remember them, switch to another adress space, and safely - // mark them as unused. Also handling the forking related pages. - addr_t fractal1 = PMLS[2]->entry[1022]; - addr_t dir = currentdir; - - // We want to free the pages, but we are still using them ourselves, - // so lock the page allocation structure until we are done. - Page::Lock(); - - // In case any pages wasn't cleaned at this point. - // TODO: Page::Put calls may internally Page::Get and then reusing pages we are not done with just yet - RecursiveFreeUserspacePages(TOPPMLLEVEL, 0); - - // Switch to the address space from when the world was originally - // created. It should contain the kernel, the whole kernel, and - // nothing but the kernel. - PML* const BOOTPML2 = (PML* const) 0x11000UL; - if ( !fallback ) - fallback = (addr_t) BOOTPML2; - - if ( func ) - func(fallback, user); - else - SwitchAddressSpace(fallback); - - // Ok, now we got marked everything left behind as unused, we can - // now safely let another thread use the pages. - Page::Unlock(); - - // These are safe to free since we switched address space. - Page::Put(fractal1 & PML_ADDRESS); - Page::Put(dir & PML_ADDRESS); - } - - const size_t KERNEL_STACK_SIZE = 256UL * 1024UL; - const addr_t KERNEL_STACK_END = 0x80001000UL; - const addr_t KERNEL_STACK_START = KERNEL_STACK_END + KERNEL_STACK_SIZE; - - const addr_t VIRTUAL_AREA_LOWER = KERNEL_STACK_START; - const addr_t VIRTUAL_AREA_UPPER = 0xFF400000UL; - - void GetKernelVirtualArea(addr_t* from, size_t* size) - { - *from = KERNEL_STACK_END; - *size = VIRTUAL_AREA_UPPER - VIRTUAL_AREA_LOWER; - } - - void GetUserVirtualArea(uintptr_t* from, size_t* size) - { - *from = 0x400000; // 4 MiB. - *size = 0x80000000 - *from; // 2 GiB - 4 MiB. - } - - addr_t GetKernelStack() - { - return KERNEL_STACK_START; - } - - size_t GetKernelStackSize() - { - return KERNEL_STACK_SIZE; - } + addr_t entry = pml->entry[i]; + if ( !(entry & PML_PRESENT) ) + continue; + if ( !(entry & PML_USERSPACE) ) + continue; + if ( !(entry & PML_FORK) ) + continue; + if ( 1 < level ) + RecursiveFreeUserspacePages(level-1, offset * ENTRIES + i); + addr_t addr = pml->entry[i] & PML_ADDRESS; + // No need to unmap the page, we just need to mark it as unused. + Page::PutUnlocked(addr); } } + +void DestroyAddressSpace(addr_t fallback, void (*func)(addr_t, void*), void* user) +{ + // Look up the last few entries used for the fractal mapping. These + // cannot be unmapped as that would destroy the world. Instead, we + // will remember them, switch to another adress space, and safely + // mark them as unused. Also handling the forking related pages. + addr_t fractal1 = PMLS[2]->entry[1022]; + addr_t dir = currentdir; + + // We want to free the pages, but we are still using them ourselves, + // so lock the page allocation structure until we are done. + Page::Lock(); + + // In case any pages wasn't cleaned at this point. + // TODO: Page::Put calls may internally Page::Get and then reusing pages we are not done with just yet + RecursiveFreeUserspacePages(TOPPMLLEVEL, 0); + + // Switch to the address space from when the world was originally + // created. It should contain the kernel, the whole kernel, and + // nothing but the kernel. + PML* const BOOTPML2 = (PML* const) 0x11000UL; + if ( !fallback ) + fallback = (addr_t) BOOTPML2; + + if ( func ) + func(fallback, user); + else + SwitchAddressSpace(fallback); + + // Ok, now we got marked everything left behind as unused, we can + // now safely let another thread use the pages. + Page::Unlock(); + + // These are safe to free since we switched address space. + Page::Put(fractal1 & PML_ADDRESS); + Page::Put(dir & PML_ADDRESS); +} + +const size_t KERNEL_STACK_SIZE = 256UL * 1024UL; +const addr_t KERNEL_STACK_END = 0x80001000UL; +const addr_t KERNEL_STACK_START = KERNEL_STACK_END + KERNEL_STACK_SIZE; + +const addr_t VIRTUAL_AREA_LOWER = KERNEL_STACK_START; +const addr_t VIRTUAL_AREA_UPPER = 0xFF400000UL; + +void GetKernelVirtualArea(addr_t* from, size_t* size) +{ + *from = KERNEL_STACK_END; + *size = VIRTUAL_AREA_UPPER - VIRTUAL_AREA_LOWER; +} + +void GetUserVirtualArea(uintptr_t* from, size_t* size) +{ + *from = 0x400000; // 4 MiB. + *size = 0x80000000 - *from; // 2 GiB - 4 MiB. +} + +addr_t GetKernelStack() +{ + return KERNEL_STACK_START; +} + +size_t GetKernelStackSize() +{ + return KERNEL_STACK_SIZE; +} + +} // namespace Memory +} // namespace Sortix diff --git a/kernel/x86/memorymanagement.h b/kernel/x86/memorymanagement.h index c40391ec..1cf874d3 100644 --- a/kernel/x86/memorymanagement.h +++ b/kernel/x86/memorymanagement.h @@ -1,6 +1,6 @@ /******************************************************************************* - Copyright(C) Jonas 'Sortie' Termansen 2011. + Copyright(C) Jonas 'Sortie' Termansen 2011, 2014. This file is part of Sortix. @@ -25,30 +25,33 @@ #ifndef SORTIX_X64_MEMORYMANAGEMENT_H #define SORTIX_X64_MEMORYMANAGEMENT_H -namespace Sortix +namespace Sortix { +namespace Memory { + +const size_t TOPPMLLEVEL = 2; +const size_t ENTRIES = 4096UL / sizeof(addr_t); +const size_t TRANSBITS = 10; + +PML* const PMLS[TOPPMLLEVEL + 1] = { - namespace Memory - { - const size_t TOPPMLLEVEL = 2; - const size_t ENTRIES = 4096UL / sizeof(addr_t); - const size_t TRANSBITS = 10; + (PML* const) 0x0, + (PML* const) 0xFFC00000UL, + (PML* const) 0xFFBFF000UL, +}; - PML* const PMLS[TOPPMLLEVEL + 1] = - { - (PML* const) 0x0, - (PML* const) 0xFFC00000UL, - (PML* const) 0xFFBFF000UL, - }; +PML* const FORKPML = (PML* const) 0xFF800000UL; - PML* const FORKPML = (PML* const) 0xFF800000UL; - } +} // namespace Memory +} // namespace Sortix - namespace Page - { - addr_t* const STACK = (addr_t* const) 0xFF400000UL; - const size_t MAXSTACKSIZE = (4UL*1024UL*1024UL); - const size_t MAXSTACKLENGTH = MAXSTACKSIZE / sizeof(addr_t); - } -} +namespace Sortix { +namespace Page { + +addr_t* const STACK = (addr_t* const) 0xFF400000UL; +const size_t MAXSTACKSIZE = (4UL*1024UL*1024UL); +const size_t MAXSTACKLENGTH = MAXSTACKSIZE / sizeof(addr_t); + +} // namespace Page +} // namespace Sortix #endif