From 7dd5178ddd0078d2e1ef38722bff1172f79f0f05 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Mon, 17 Nov 2025 22:55:57 -0500 Subject: kern/amd64: mp: Sync MTRRs between APs and BSP Section 11.11.8 of the Intel SDM states that all MTRRs in a multiprocessing system must be synced to prevent undefined behavior. Signed-off-by: Ian Moffett --- sys/arch/amd64/cpu/mp.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++ sys/inc/arch/amd64/msr.h | 4 +++ 2 files changed, 89 insertions(+) (limited to 'sys') diff --git a/sys/arch/amd64/cpu/mp.c b/sys/arch/amd64/cpu/mp.c index 866aff1..09a4249 100644 --- a/sys/arch/amd64/cpu/mp.c +++ b/sys/arch/amd64/cpu/mp.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -88,10 +89,51 @@ struct ap_bootspace { uintptr_t pml1; }; +/* + * A temporary area to save the BSPs MTRRs so they + * can be loaded into the APs + */ +struct mtrr_save { + uintptr_t physbase[256]; + uintptr_t physmask[256]; +} mtrr_save; + static struct ap_bootspace bs; static volatile size_t ap_sync = 0; __section(".trampoline") static char ap_code[4096]; +static void +cpu_mtrr_save(void) +{ + uint64_t mtrr_cap; + uint64_t physbase, physmask; + uint8_t mtrr_count; + + mtrr_cap = rdmsr(IA32_MTRR_CAP); + mtrr_count = mtrr_cap & 0xFF; + + for (size_t i = 0; i < mtrr_count; ++i) { + mtrr_save.physbase[i] = rdmsr(IA32_MTRR_PHYSBASE + (2 * i)); + mtrr_save.physmask[i] = rdmsr(IA32_MTRR_PHYSMASK + (2 * i)); + } +} + +static void +cpu_mtrr_fetch(void) +{ + uint64_t mtrr_cap; + uint64_t physbase, physmask; + uint8_t mtrr_count; + + mtrr_cap = rdmsr(IA32_MTRR_CAP); + mtrr_count = mtrr_cap & 0xFF; + + for (size_t i = 0; i < mtrr_count; ++i) { + wrmsr(IA32_MTRR_PHYSBASE + (2 * i), mtrr_save.physbase[i]); + wrmsr(IA32_MTRR_PHYSMASK + (2 * i), mtrr_save.physmask[i]); + } +} + /* * Initialize the boot address space */ @@ -177,6 +219,48 @@ cpu_free_bootspace(struct ap_bootspace *bs) static void cpu_lm_entry(void) { + /* + * Put the processor in no cache fill mode so that we can safely + * update MTRRs without worrying about the ground moving under + * us... + */ + __asmv( + "mov %%cr0, %%rax\n\t" /* CR0 -> RAX */ + "or $0x40000000, %%rax\n\t" /* Set CR0.CD */ + "mov $0xDFFFFFFF, %%rbx\n\t" /* ~(1 << 31) -> RBX */ + "and %%rbx, %%rax\n\t" /* Unset CR0.NW */ + "mov %%rax, %%cr0\n\t" /* Write it back */ + : + : + : "rax", "rbx" + ); + + /* Flush all caches */ + __asmv( + "wbinvd\n\t" /* Write-back and flush dcache */ + "mov %%cr3, %%rax\n\t" /* CR3 -> RAX */ + "mov %%rax, %%cr3" /* RAX -> CR3; flush TLB */ + : + : + : "memory", "rax" + ); + + cpu_mtrr_fetch(); + + /* + * Now we load all the MTRRs given to us by the BSP + * before we re-enable normal caching operation + */ + __asmv( + "mov %%cr0, %%rax\n\t" /* CR0 -> RAX */ + "mov $0xBFFFFFFF, %%rbx\n\t" /* ~(1 << 30) -> RBX */ + "and %%rbx, %%rax\n\t" /* Unset CR0.CD */ + "mov %%rax, %%cr0\n\t" /* Write it back */ + : + : + : "rax", "rbx" + ); + for (;;) { __asmv("cli; hlt"); } @@ -271,6 +355,7 @@ cpu_start_aps(struct cpu_info *ci) /* Initialize the bootspace */ cpu_init_bootspace(&bs); + cpu_mtrr_save(); /* Copy the bring up code to the BUA */ bua = AP_BUA_VADDR; diff --git a/sys/inc/arch/amd64/msr.h b/sys/inc/arch/amd64/msr.h index e3f8887..c194ec0 100644 --- a/sys/inc/arch/amd64/msr.h +++ b/sys/inc/arch/amd64/msr.h @@ -35,6 +35,10 @@ #define IA32_APIC_BASE 0x0000001B #define IA32_GS_BASE 0xC0000101 +#define IA32_MTRR_CAP 0x000000FE +#define IA32_DEF_TYPE 0x000002FF +#define IA32_MTRR_PHYSBASE 0x00000200 +#define IA32_MTRR_PHYSMASK 0x00000201 #define IA32_KERNEL_GS_BASE 0xC0000102 __always_inline static inline uint64_t -- cgit v1.2.3