2017-11-04 14:31:24 +00:00
|
|
|
#include "paging.h"
|
2017-11-08 04:54:47 +00:00
|
|
|
#include "panic.h"
|
2017-11-05 06:14:55 +00:00
|
|
|
|
2021-12-20 06:14:07 +00:00
|
|
|
#include <kernaux/asm/i386.h>
|
2022-06-24 05:50:07 +00:00
|
|
|
|
|
|
|
#include <string.h>
|
2020-11-27 14:05:20 +00:00
|
|
|
|
2023-01-05 15:35:45 +00:00
|
|
|
extern uint8_t _kernel_size;
|
|
|
|
extern uint8_t _kernel_phys_base;
|
|
|
|
extern uint8_t _kernel_virt_base;
|
|
|
|
|
2021-12-19 02:13:51 +00:00
|
|
|
static void mapping(struct Paging *paging, uint32_t virt, uint32_t phys);
|
2021-12-19 01:25:11 +00:00
|
|
|
|
2021-12-18 20:26:49 +00:00
|
|
|
void paging_load(struct Paging *const paging)
|
|
|
|
{
|
|
|
|
uint32_t page_dir_phys = (uint32_t)&paging->page_dir;
|
2021-12-20 06:14:07 +00:00
|
|
|
kernaux_asm_i386_write_cr3(page_dir_phys);
|
2021-12-18 20:26:49 +00:00
|
|
|
}
|
|
|
|
|
2017-11-04 14:59:01 +00:00
|
|
|
void paging_enable()
|
|
|
|
{
|
2021-12-20 06:14:07 +00:00
|
|
|
uint32_t cr0 = kernaux_asm_i386_read_cr0();
|
|
|
|
uint32_t cr4 = kernaux_asm_i386_read_cr4();
|
2017-11-04 14:59:01 +00:00
|
|
|
|
2022-12-07 21:54:28 +00:00
|
|
|
assert(cr0 & KERNAUX_ARCH_X86_CR0_PE, "The boot loader should have put us in protected mode.");
|
2017-11-04 14:59:01 +00:00
|
|
|
|
|
|
|
// First clear PG and PGE flag, as PGE must be enabled after PG.
|
2022-12-07 21:54:28 +00:00
|
|
|
kernaux_asm_i386_write_cr0(cr0 & ~KERNAUX_ARCH_X86_CR0_PG);
|
|
|
|
kernaux_asm_i386_write_cr4(cr4 & ~(KERNAUX_ARCH_X86_CR4_PGE | KERNAUX_ARCH_X86_CR4_PSE));
|
2017-11-04 14:59:01 +00:00
|
|
|
|
2021-12-20 06:14:07 +00:00
|
|
|
cr0 = kernaux_asm_i386_read_cr0();
|
|
|
|
cr4 = kernaux_asm_i386_read_cr4();
|
2017-11-04 14:59:01 +00:00
|
|
|
|
|
|
|
// Our page table contains 4MB entries.
|
2022-12-07 21:54:28 +00:00
|
|
|
// cr4 |= KERNAUX_ARCH_X86_CR4_PSE;
|
2017-11-04 14:59:01 +00:00
|
|
|
|
2021-12-20 06:14:07 +00:00
|
|
|
kernaux_asm_i386_write_cr4(cr4);
|
2017-11-04 14:59:01 +00:00
|
|
|
|
|
|
|
// First enable paging, then enable global page flag.
|
2022-12-07 21:54:28 +00:00
|
|
|
cr0 |= KERNAUX_ARCH_X86_CR0_PG;
|
2017-11-04 14:59:01 +00:00
|
|
|
|
2021-12-20 06:14:07 +00:00
|
|
|
kernaux_asm_i386_write_cr0(cr0);
|
2017-11-04 14:59:01 +00:00
|
|
|
|
2022-12-07 21:54:28 +00:00
|
|
|
cr0 |= KERNAUX_ARCH_X86_CR0_WP;
|
2017-11-04 14:59:01 +00:00
|
|
|
|
2021-12-20 06:14:07 +00:00
|
|
|
kernaux_asm_i386_write_cr0(cr0);
|
|
|
|
kernaux_asm_i386_write_cr4(cr4);
|
2017-11-04 14:59:01 +00:00
|
|
|
}
|
|
|
|
|
2021-12-18 05:19:44 +00:00
|
|
|
void paging_clear(struct Paging *const paging)
|
2017-11-04 14:31:24 +00:00
|
|
|
{
|
2021-12-18 20:33:18 +00:00
|
|
|
memset(paging, 0, sizeof(*paging));
|
2017-11-04 14:31:24 +00:00
|
|
|
}
|
|
|
|
|
2021-12-19 02:13:51 +00:00
|
|
|
void paging_identity(struct Paging *const paging)
|
2017-11-04 14:31:24 +00:00
|
|
|
{
|
2021-12-21 05:19:17 +00:00
|
|
|
for (size_t i = 0; i < KERNAUX_ARCH_I386_PAGES_COUNT_MAX; ++i) {
|
|
|
|
const size_t addr = i * KERNAUX_ARCH_I386_PAGE_SIZE;
|
2021-12-19 02:13:51 +00:00
|
|
|
mapping(paging, addr, addr);
|
2017-11-04 14:31:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-05 15:35:45 +00:00
|
|
|
void paging_mapkernel(struct Paging *const paging)
|
|
|
|
{
|
|
|
|
assert(!((size_t)&_kernel_phys_base % KERNAUX_ARCH_I386_PAGE_SIZE), "Kernel physical address is not aligned.");
|
|
|
|
assert(!((size_t)&_kernel_virt_base % KERNAUX_ARCH_I386_PAGE_SIZE), "Kernel virtual address is not aligned.");
|
2017-11-04 14:31:24 +00:00
|
|
|
|
2023-01-05 15:35:45 +00:00
|
|
|
size_t phys = (size_t)&_kernel_phys_base;
|
|
|
|
size_t virt = (size_t)&_kernel_virt_base;
|
2021-12-18 03:44:19 +00:00
|
|
|
size_t mapped = 0;
|
2017-11-04 14:31:24 +00:00
|
|
|
|
2023-01-05 15:35:45 +00:00
|
|
|
while (mapped < (size_t)&_kernel_size) {
|
2021-12-19 02:13:51 +00:00
|
|
|
mapping(paging, virt, phys);
|
2017-11-04 14:31:24 +00:00
|
|
|
|
2021-12-21 05:19:17 +00:00
|
|
|
phys += KERNAUX_ARCH_I386_PAGE_SIZE;
|
|
|
|
virt += KERNAUX_ARCH_I386_PAGE_SIZE;
|
|
|
|
mapped += KERNAUX_ARCH_I386_PAGE_SIZE;
|
2021-12-19 01:25:11 +00:00
|
|
|
}
|
|
|
|
}
|
2017-11-04 14:31:24 +00:00
|
|
|
|
2021-12-19 01:25:11 +00:00
|
|
|
void mapping(
|
|
|
|
struct Paging *const paging,
|
|
|
|
const uint32_t virt,
|
|
|
|
const uint32_t phys
|
|
|
|
) {
|
2022-12-22 16:01:29 +00:00
|
|
|
assert(paging, "paging");
|
2021-12-19 01:25:11 +00:00
|
|
|
|
2021-12-21 05:43:59 +00:00
|
|
|
const size_t pde_index = KERNAUX_ARCH_I386_ADDR_TO_PDE_INDEX(virt);
|
|
|
|
const size_t pte_index = KERNAUX_ARCH_I386_ADDR_TO_PTE_INDEX(virt);
|
2021-12-20 04:20:34 +00:00
|
|
|
|
|
|
|
struct KernAux_Arch_I386_PageDir *const page_dir = &paging->page_dir;
|
|
|
|
struct KernAux_Arch_I386_PageTable *const page_table = &paging->page_tables[pde_index];
|
|
|
|
|
2022-12-07 19:29:46 +00:00
|
|
|
union KernAux_Arch_I386_PDE *const pde = &page_dir->pdes[pde_index];
|
|
|
|
union KernAux_Arch_I386_PTE *const pte = &page_table->ptes[pte_index];
|
|
|
|
|
|
|
|
if (!pde->bitfields.present) {
|
|
|
|
pde->bitfields.addr = KERNAUX_ARCH_I386_ADDR_TO_PDE_ADDR(page_table);
|
|
|
|
|
|
|
|
pde->bitfields.available1 = 0;
|
|
|
|
pde->bitfields.page_size = 0;
|
|
|
|
pde->bitfields.available0 = 0;
|
|
|
|
pde->bitfields.accessed = 0;
|
|
|
|
pde->bitfields.cache_disabled = 0;
|
|
|
|
pde->bitfields.write_through = 0;
|
|
|
|
pde->bitfields.user = 0;
|
|
|
|
pde->bitfields.writable = 1;
|
|
|
|
pde->bitfields.present = 1;
|
2017-11-04 14:31:24 +00:00
|
|
|
}
|
2021-12-20 04:20:34 +00:00
|
|
|
|
2022-12-07 19:29:46 +00:00
|
|
|
pte->bitfields.addr = KERNAUX_ARCH_I386_ADDR_TO_PTE_ADDR(phys);
|
|
|
|
|
|
|
|
pte->bitfields.available = 1;
|
|
|
|
pte->bitfields.global = 1;
|
|
|
|
pte->bitfields.attr_table = 0;
|
|
|
|
pte->bitfields.dirty = 0;
|
|
|
|
pte->bitfields.accessed = 0;
|
|
|
|
pte->bitfields.cache_disabled = 0;
|
|
|
|
pte->bitfields.write_through = 0;
|
|
|
|
pte->bitfields.user = 0;
|
|
|
|
pte->bitfields.writable = 1;
|
|
|
|
pte->bitfields.present = 1;
|
2017-11-04 14:31:24 +00:00
|
|
|
}
|