1
0
Fork 0
mirror of https://gitlab.com/sortix/sortix.git synced 2023-02-13 20:55:38 -05:00

Added support for MSRs, PAT and MTRRs.

This provides control over the caching of memory, which makes write-combined
IO possible. Graphics drivers can use this to transfer data at a much higher
rate to the video memory.

The implementation is a bit hacky but it'll do for now. It provides enough
support for the experimental VBE driver to work on the real computers I
tested it on, even if the BIOS uses screwed up default MTRRs.

The virtual memory layer now automatically uses the PAT feature if available
but in a backwards compatible manner and otherwise just tries to approximate
PAT features if they are asked for.
This commit is contained in:
Jonas 'Sortie' Termansen 2012-07-01 00:50:27 +02:00
parent aac12add54
commit 2046482e77
5 changed files with 404 additions and 5 deletions

View file

@ -41,6 +41,7 @@ ifdef X86FAMILY
$(CPU)/thread.o \
$(CPU)/scheduler.o \
$(CPU)/process.o \
x86-family/msr.o \
x86-family/x86-family.o
CPUFLAGS:=$(CPUFLAGS) -mno-mmx -mno-sse -mno-sse2 -mno-sse3 -mno-3dnow
endif

View file

@ -30,6 +30,7 @@
#include <sortix/kernel/memorymanagement.h>
#include "memorymanagement.h"
#include "syscall.h"
#include "msr.h"
using namespace Maxsi;
@ -53,6 +54,7 @@ namespace Sortix
void InitCPU();
void AllocateKernelPMLs();
int SysMemStat(size_t* memused, size_t* memtotal);
addr_t PAT2PMLFlags[PAT_NUM];
void Init(multiboot_info_t* bootinfo)
{
@ -76,6 +78,29 @@ namespace Sortix
"specification compliant?");
}
// If supported, setup the Page Attribute Table feature that allows
// us to control the memory type (caching) of memory more precisely.
if ( MSR::IsPATSupported() )
{
MSR::InitializePAT();
for ( addr_t i = 0; i < PAT_NUM; i++ )
PAT2PMLFlags[i] = EncodePATAsPMLFlag(i);
}
// Otherwise, reroute all requests to the backwards compatible
// scheme. TODO: Not all early 32-bit x86 CPUs supports these
// values, so we need yet another fallback.
else
{
PAT2PMLFlags[PAT_UC] = PML_WRTHROUGH | PML_NOCACHE;
PAT2PMLFlags[PAT_WC] = PML_WRTHROUGH | PML_NOCACHE; // Approx.
PAT2PMLFlags[2] = 0; // No such flag.
PAT2PMLFlags[3] = 0; // No such flag.
PAT2PMLFlags[PAT_WT] = PML_WRTHROUGH;
PAT2PMLFlags[PAT_WP] = PML_WRTHROUGH; // Approx.
PAT2PMLFlags[PAT_WB] = 0;
PAT2PMLFlags[PAT_UCM] = PML_NOCACHE;
}
// Initialize CPU-specific things.
InitCPU();
@ -378,7 +403,7 @@ namespace Sortix
}
template <bool userspace, bool invalidate>
bool Map(addr_t physical, addr_t mapto)
bool Map(addr_t physical, addr_t mapto, addr_t extraflags = 0)
{
const addr_t userflags = userspace ? (PML_USERSPACE | PML_FORK) : 0;
const addr_t flags = userflags | PML_PRESENT | PML_WRITABLE;
@ -408,7 +433,7 @@ namespace Sortix
// TODO: Possible memory leak when page allocation fails.
addr_t page = Page::Get();
if ( page == 0 ) { return false; }
entry = page | flags;
entry = page | flags | extraflags;
// Invalidate the new PML and reset it to zeroes.
addr_t pmladdr = (addr_t) (PMLS[i-1] + childoffset);
@ -427,7 +452,8 @@ namespace Sortix
}
// Actually map the physical page to the virtual page.
(PMLS[1] + offset)->entry[pmlchildid[1]] = physical | flags;
const addr_t entry = physical | flags | extraflags;
(PMLS[1] + offset)->entry[pmlchildid[1]] = entry;
if ( invalidate )
{
@ -490,14 +516,26 @@ namespace Sortix
return result;
}
bool MapKernelPAT(addr_t physical, addr_t mapto, addr_t mtype)
{
addr_t extraflags = PAT2PMLFlags[mtype];
return Map<false, false>(physical, mapto, extraflags);
}
bool MapKernel(addr_t physical, addr_t mapto)
{
return Map<false, false>(physical, mapto);
return MapKernelPAT(physical, mapto, PAT_WB);
}
bool MapUserPAT(addr_t physical, addr_t mapto, addr_t mtype)
{
addr_t extraflags = PAT2PMLFlags[mtype];
return Map<true, false>(physical, mapto, extraflags);
}
bool MapUser(addr_t physical, addr_t mapto)
{
return Map<true, false>(physical, mapto);
return MapUserPAT(physical, mapto, PAT_WB);
}
addr_t UnmapKernel(addr_t mapto)

View file

@ -37,12 +37,57 @@ namespace Sortix
const addr_t PML_PRESENT = (1<<0);
const addr_t PML_WRITABLE = (1<<1);
const addr_t PML_USERSPACE = (1<<2);
const addr_t PML_WRTHROUGH = (1<<3);
const addr_t PML_NOCACHE = (1<<4);
const addr_t PML_PAT = (1<<7);
const addr_t PML_AVAILABLE1 = (1<<9);
const addr_t PML_AVAILABLE2 = (1<<10);
const addr_t PML_AVAILABLE3 = (1<<11);
const addr_t PML_FORK = PML_AVAILABLE1;
const addr_t PML_FLAGS = (0xFFFUL); // Bits used for the flags.
const addr_t PML_ADDRESS = (~0xFFFUL); // Bits used for the address.
const addr_t PAT_UC = 0x00; // Uncacheable
const addr_t PAT_WC = 0x01; // Write-Combine
const addr_t PAT_WT = 0x04; // Writethrough
const addr_t PAT_WP = 0x05; // Write-Protect
const addr_t PAT_WB = 0x06; // Writeback
const addr_t PAT_UCM = 0x07; // Uncacheable, overruled by MTRR.
const addr_t PAT_NUM = 0x08;
// Desired PAT-Register PA-Field Indexing (different from BIOS defaults)
const addr_t PA[PAT_NUM] =
{
PAT_WB,
PAT_WT,
PAT_UCM,
PAT_UC,
PAT_WC,
PAT_WP,
0,
0,
};
// Inverse function of the above.
const addr_t PAINV[PAT_NUM] =
{
3, // UC
4, // WC
7, // No such
8, // No such
1, // WT
5, // WP,
0, // WB
2, // UCM
};
static inline addr_t EncodePATAsPMLFlag(addr_t pat)
{
pat = PAINV[pat];
addr_t result = 0;
if ( pat & 0x1 ) { result |= PML_WRTHROUGH; }
if ( pat & 0x2 ) { result |= PML_NOCACHE; }
if ( pat & 0x4 ) { result |= PML_PAT; }
return result;
}
bool MapKernelPAT(addr_t physical, addr_t mapto, addr_t mtype);
bool MapUserPAT(addr_t physical, addr_t mapto, addr_t mtype);
}
}

273
sortix/x86-family/msr.cpp Normal file
View file

@ -0,0 +1,273 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2012.
Copyright(C) Free Software Foundation, Inc. 2005, 2006, 2007, 2008, 2009.
This file is part of Sortix.
Sortix is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along with
Sortix. If not, see <http://www.gnu.org/licenses/>.
x86-family/msr.cpp
Functions to manipulate Model Specific Registers. MTRR code is partially
based on code from GNU GRUB.
*******************************************************************************/
#include <sortix/kernel/platform.h>
#include "memorymanagement.h"
namespace Sortix {
namespace MSR {
const uint32_t bit_MTRR = 0x00001000U;
const uint32_t bit_PAT = 0x00010000U;
// TODO: Move this to a better location or use <cpuid.h>.
static inline bool IsCPUIdSupported()
{
#ifdef __x86_64__
// TODO: Isn't this always supported under x86_64?
uint64_t id_supported;
asm ("pushfq\n\t"
"popq %%rax /* Get EFLAGS into EAX */\n\t"
"movq %%rax, %%rcx /* Save original flags in ECX */\n\t"
"xorq $0x200000, %%rax /* Flip ID bit in EFLAGS */\n\t"
"pushq %%rax /* Store modified EFLAGS on stack */\n\t"
"popfq /* Replace current EFLAGS */\n\t"
"pushfq /* Read back the EFLAGS */\n\t"
"popq %%rax /* Get EFLAGS into EAX */\n\t"
"xorq %%rcx, %%rax /* Check if flag could be modified */\n\t"
: "=a" (id_supported)
: /* No inputs. */
: /* Clobbered: */ "%rcx");
#else
uint32_t id_supported;
asm ("pushfl\n\t"
"popl %%eax /* Get EFLAGS into EAX */\n\t"
"movl %%eax, %%ecx /* Save original flags in ECX */\n\t"
"xorl $0x200000, %%eax /* Flip ID bit in EFLAGS */\n\t"
"pushl %%eax /* Store modified EFLAGS on stack */\n\t"
"popfl /* Replace current EFLAGS */\n\t"
"pushfl /* Read back the EFLAGS */\n\t"
"popl %%eax /* Get EFLAGS into EAX */\n\t"
"xorl %%ecx, %%eax /* Check if flag could be modified */\n\t"
: "=a" (id_supported)
: /* No inputs. */
: /* Clobbered: */ "%rcx");
#endif
return id_supported != 0;
}
#define cpuid(num,a,b,c,d) \
asm volatile ("xchgl %%ebx, %1; cpuid; xchgl %%ebx, %1" \
: "=a" (a), "=r" (b), "=c" (c), "=d" (d) \
: "0" (num))
#define rdmsr(num,a,d) \
asm volatile ("rdmsr" : "=a" (a), "=d" (d) : "c" (num))
#define wrmsr(num,lo,hi) \
asm volatile ("wrmsr" : : "c" (num), "a" (lo), "d" (hi) : "memory")
#define mtrr_base(reg) (0x200 + (reg) * 2)
#define mtrr_mask(reg) (0x200 + (reg) * 2 + 1)
void EnableMTRR(int mtrr)
{
uint32_t eax, edx;
uint32_t mask_lo, mask_hi;
rdmsr(mtrr_mask(mtrr), eax, edx);
mask_lo = eax;
mask_hi = edx;
mask_lo |= 0x800 /* valid */;
wrmsr(mtrr_mask(mtrr), mask_lo, mask_hi);
}
void DisableMTRR(int mtrr)
{
uint32_t eax, edx;
uint32_t mask_lo, mask_hi;
rdmsr(mtrr_mask(mtrr), eax, edx);
mask_lo = eax;
mask_hi = edx;
mask_lo &= ~0x800 /* valid */;
wrmsr(mtrr_mask(mtrr), mask_lo, mask_hi);
}
void CopyMTRR(int dst, int src)
{
uint32_t base_lo, base_hi;
uint32_t mask_lo, mask_hi;
rdmsr(mtrr_base(src), base_lo, base_hi);
rdmsr(mtrr_mask(src), mask_lo, mask_hi);
wrmsr(mtrr_base(dst), base_lo, base_hi);
wrmsr(mtrr_mask(dst), mask_lo, mask_hi);
}
bool IsPATSupported()
{
if ( !IsCPUIdSupported() ) { return false; }
uint32_t eax, ebx, ecx, edx;
cpuid(1, eax, ebx, ecx, edx);
uint32_t features = edx;
return features & bit_PAT;
}
void InitializePAT()
{
using namespace Sortix::Memory;
const uint32_t LO = PA[0] << 0 | PA[1] << 8 | PA[2] << 16 | PA[3] << 24;
const uint32_t HI = PA[4] << 0 | PA[5] << 8 | PA[6] << 16 | PA[7] << 24;
const int PAT_REG = 0x0277;
wrmsr(PAT_REG, LO, HI);
}
bool IsMTRRSupported()
{
if ( !IsCPUIdSupported() ) { return false; }
uint32_t eax, ebx, ecx, edx;
cpuid(1, eax, ebx, ecx, edx);
uint32_t features = edx;
return features & bit_MTRR;
}
// TODO: Yes, returning a string as an error and giving the result in a pointer
// is very bad design. Please fix this at some point. Also improve the code such
// that it is more flexible.
const char* SetupMTRRForWC(addr_t base, size_t size, int* ret)
{
uint32_t eax, ebx, ecx, edx;
uint32_t mtrrcap;
int var_mtrrs;
uint32_t max_extended_cpuid;
uint32_t maxphyaddr;
uint64_t fb_base, fb_size;
uint64_t size_bits, fb_mask;
uint32_t bits_lo, bits_hi;
uint64_t bits;
int i, first_unused = -1;
uint32_t base_lo, base_hi, mask_lo, mask_hi;
fb_base = (uint64_t) base;
fb_size = (uint64_t) size;
// Check that fb_base and fb_size can be represented using a single MTRR.
if ( fb_base < (1 << 20) )
return "below 1MB, so covered by fixed-range MTRRs";
if ( fb_base >= (1LL << 36) )
return "over 36 bits, so out of range";
if ( fb_size < (1 << 12) )
return "variable-range MTRRs must cover at least 4KB";
size_bits = fb_size;
while ( size_bits > 1 )
size_bits >>= 1;
if ( size_bits != 1 )
return "not a power of two";
if ( fb_base & (fb_size - 1) )
return "not aligned on size boundary";
fb_mask = ~(fb_size - 1);
// Check CPU capabilities.
if ( !IsCPUIdSupported() )
return "cpuid not supported, therefore mtrr not supported";
if ( !IsMTRRSupported() )
return "cpu does not support mtrr";
rdmsr(0xFE, eax, edx);
mtrrcap = eax;
if ( !(mtrrcap & 0x00000400) ) /* write-combining */
return "write combining doesn't seem to be supported";
var_mtrrs = (mtrrcap & 0xFF);
cpuid (0x80000000, eax, ebx, ecx, edx);
max_extended_cpuid = eax;
if ( max_extended_cpuid >= 0x80000008 )
{
cpuid(0x80000008, eax, ebx, ecx, edx);
maxphyaddr = (eax & 0xFF);
}
else
maxphyaddr = 36;
bits_lo = 0xFFFFF000; /* assume maxphyaddr >= 36 */
bits_hi = (1 << (maxphyaddr - 32)) - 1;
bits = bits_lo | ((uint64_t) bits_hi << 32);
// Check whether an MTRR already covers this region. If not, take an unused
// one if possible.
for ( i = 0; i < var_mtrrs; i++ )
{
rdmsr(mtrr_mask (i), eax, edx);
mask_lo = eax;
mask_hi = edx;
if ( mask_lo & 0x800 ) /* valid */
{
uint64_t real_base, real_mask;
rdmsr(mtrr_base(i), eax, edx);
base_lo = eax;
base_hi = edx;
real_base = ((uint64_t) (base_hi & bits_hi) << 32) |
(base_lo & bits_lo);
real_mask = ((uint64_t) (mask_hi & bits_hi) << 32) |
(mask_lo & bits_lo);
if ( real_base < (fb_base + fb_size) &&
real_base + (~real_mask & bits) >= fb_base)
return "region already covered by another mtrr";
}
else if ( first_unused < 0 )
first_unused = i;
}
if ( first_unused < 0 )
return "all MTRRs in use";
// Set up the first unused MTRR we found.
rdmsr(mtrr_base(first_unused), eax, edx);
base_lo = eax;
base_hi = edx;
rdmsr(mtrr_mask(first_unused), eax, edx);
mask_lo = eax;
mask_hi = edx;
base_lo = (base_lo & ~bits_lo & ~0xFF) |
(fb_base & bits_lo) | 0x01 /* WC */;
base_hi = (base_hi & ~bits_hi) |
((fb_base >> 32) & bits_hi);
wrmsr(mtrr_base(first_unused), base_lo, base_hi);
mask_lo = (mask_lo & ~bits_lo) |
(fb_mask & bits_lo) | 0x800 /* valid */;
mask_hi = (mask_hi & ~bits_hi) |
((fb_mask >> 32) & bits_hi);
wrmsr(mtrr_mask(first_unused), mask_lo, mask_hi);
if ( ret )
*ret = first_unused;
return NULL;
}
} // namespace MSR
} // namespace Sortix

42
sortix/x86-family/msr.h Normal file
View file

@ -0,0 +1,42 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2012.
This file is part of Sortix.
Sortix is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along with
Sortix. If not, see <http://www.gnu.org/licenses/>.
x86-family/msr.h
Functions to manipulate Model Specific Registers.
*******************************************************************************/
#ifndef SORTIX_X86_FAMILY_MSR_H
#define SORTIX_X86_FAMILY_MSR_H
namespace Sortix {
namespace MSR {
bool IsPATSupported();
void InitializePAT();
bool IsMTRRSupported();
const char* SetupMTRRForWC(addr_t base, size_t size, int* ret = NULL);
void EnableMTRR(int mtrr);
void DisableMTRR(int mtrr);
void CopyMTRR(int dst, int src);
} // namespace MSR
} // namespace Sortix
#endif