1
0
Fork 0
mirror of https://github.com/tailix/kernel.git synced 2024-10-30 12:03:52 -04:00
kernel/kernel/paging.c

109 lines
2.8 KiB
C
Raw Normal View History

#include "paging.h"
2017-11-07 23:54:47 -05:00
#include "panic.h"
2017-11-05 01:14:55 -05:00
2021-12-14 21:10:49 -05:00
#include <kernaux/libc.h>
2020-11-29 20:03:45 -05:00
#include <kernaux/stdlib.h>
2020-11-27 09:05:20 -05:00
2021-12-18 20:39:38 -05:00
static void mapping(
struct Paging *paging,
KernAux_PFA pfa,
uint32_t virt,
uint32_t phys
);
2021-12-18 20:25:11 -05:00
2021-12-18 15:26:49 -05:00
void paging_load(struct Paging *const paging)
{
uint32_t page_dir_phys = (uint32_t)&paging->page_dir;
kernaux_arch_i386_write_cr3(page_dir_phys);
}
2017-11-04 10:59:01 -04:00
void paging_enable()
{
2021-12-17 22:44:19 -05:00
uint32_t cr0 = kernaux_arch_i386_read_cr0();
uint32_t cr4 = kernaux_arch_i386_read_cr4();
2017-11-04 10:59:01 -04:00
assert(cr0 & KERNAUX_ARCH_I386_CR0_PE, "The boot loader should have put us in protected mode.");
2017-11-04 10:59:01 -04:00
// First clear PG and PGE flag, as PGE must be enabled after PG.
kernaux_arch_i386_write_cr0(cr0 & ~KERNAUX_ARCH_I386_CR0_PG);
kernaux_arch_i386_write_cr4(cr4 & ~(KERNAUX_ARCH_I386_CR4_PGE | KERNAUX_ARCH_I386_CR4_PSE));
2017-11-04 10:59:01 -04:00
cr0 = kernaux_arch_i386_read_cr0();
cr4 = kernaux_arch_i386_read_cr4();
2017-11-04 10:59:01 -04:00
// Our page table contains 4MB entries.
cr4 |= KERNAUX_ARCH_I386_CR4_PSE;
2017-11-04 10:59:01 -04:00
kernaux_arch_i386_write_cr4(cr4);
2017-11-04 10:59:01 -04:00
// First enable paging, then enable global page flag.
cr0 |= KERNAUX_ARCH_I386_CR0_PG;
2017-11-04 10:59:01 -04:00
kernaux_arch_i386_write_cr0(cr0);
2017-11-04 10:59:01 -04:00
cr0 |= KERNAUX_ARCH_I386_CR0_WP;
2017-11-04 10:59:01 -04:00
kernaux_arch_i386_write_cr0(cr0);
kernaux_arch_i386_write_cr4(cr4);
2017-11-04 10:59:01 -04:00
}
2021-12-18 00:19:44 -05:00
void paging_clear(struct Paging *const paging)
{
2021-12-18 15:33:18 -05:00
memset(paging, 0, sizeof(*paging));
}
2021-12-18 20:37:48 -05:00
void paging_identity(struct Paging *const paging, const KernAux_PFA pfa)
{
2021-12-17 22:44:19 -05:00
for (size_t i = 0; i < PAGE_DIR_LENGTH; ++i) {
2021-12-18 20:25:11 -05:00
const size_t addr = i * PAGE_BIG_SIZE;
2021-12-18 20:39:38 -05:00
mapping(paging, pfa, addr, addr);
}
}
2021-12-18 00:19:44 -05:00
void paging_mapkernel(
struct Paging *const paging,
2021-12-18 20:37:48 -05:00
const KernAux_PFA pfa,
2021-12-18 00:19:44 -05:00
const struct Kernel_Info *const kinfo
) {
2017-11-07 23:54:47 -05:00
assert(!(kinfo->kernel_phys_base % PAGE_BIG_SIZE), "Kernel physical address is not aligned.");
assert(!(kinfo->kernel_virt_base % PAGE_BIG_SIZE), "Kernel virtual address is not aligned.");
2021-12-18 20:25:11 -05:00
size_t phys = kinfo->kernel_phys_base;
size_t virt = kinfo->kernel_virt_base;
2021-12-17 22:44:19 -05:00
size_t mapped = 0;
while (mapped < kinfo->kernel_size) {
2021-12-18 20:39:38 -05:00
mapping(paging, pfa, virt, phys);
2021-12-18 20:25:11 -05:00
phys += PAGE_BIG_SIZE;
virt += PAGE_BIG_SIZE;
2017-11-05 01:14:55 -05:00
mapped += PAGE_BIG_SIZE;
2021-12-18 20:25:11 -05:00
}
}
2021-12-18 20:25:11 -05:00
void mapping(
struct Paging *const paging,
2021-12-18 20:39:38 -05:00
const KernAux_PFA pfa,
2021-12-18 20:25:11 -05:00
const uint32_t virt,
const uint32_t phys
) {
KERNAUX_NOTNULL_RETURN(paging);
const size_t pde_index = virt / PAGE_BIG_SIZE;
struct KernAux_Arch_I386_PDE *const pde = &paging->page_dir[pde_index];
if (!pde->present) {
pde->addr = PAGE_DIR_ADDR(phys);
pde->available1 = 0;
pde->page_size = 1;
pde->available0 = 0;
pde->accessed = 0;
pde->cache_disabled = 0;
pde->write_through = 0;
pde->user = 0;
pde->writable = 1;
pde->present = 1;
}
}