summaryrefslogtreecommitdiff
path: root/sys/arch/amd64/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/amd64/cpu')
-rw-r--r--sys/arch/amd64/cpu/boot.S28
-rw-r--r--sys/arch/amd64/cpu/cpu.c6
-rw-r--r--sys/arch/amd64/cpu/mmu.c168
-rw-r--r--sys/arch/amd64/cpu/mp.c87
-rw-r--r--sys/arch/amd64/cpu/spinlock.c6
5 files changed, 288 insertions, 7 deletions
diff --git a/sys/arch/amd64/cpu/boot.S b/sys/arch/amd64/cpu/boot.S
index 0cda6a7..737092f 100644
--- a/sys/arch/amd64/cpu/boot.S
+++ b/sys/arch/amd64/cpu/boot.S
@@ -27,6 +27,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <md/msr.h>
+
.text
.globl _start
.extern uart_init
@@ -44,6 +46,7 @@ _start:
lea GDTR(%rip), %rdi /* Our GDTR */
call gdt_load /* Load our GDT */
call idt_load /* Load our IDT */
+ call cpu_loinit /* Initialize processor state */
/*
* RV7 will default to APIC operation, as per a section
@@ -62,6 +65,31 @@ _start:
hlt
jmp 1b
+ .globl cpu_loinit
+cpu_loinit:
+ /*
+ * Initialize low-level CPU state
+ */
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ push %rbx
+ push %rbp
+
+ mov $IA32_EFER, %ecx /* IA32_EFER */
+ rdmsr /* -> EAX */
+ or $1<<11, %eax /* EFER.NXE */
+ wrmsr /* Write it back */
+
+ pop %rbp
+ pop %rbx
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ retq
+
.section .rodata
bootmsg:
.ascii "[ preparing since 2025 ]\n"
diff --git a/sys/arch/amd64/cpu/cpu.c b/sys/arch/amd64/cpu/cpu.c
index 9047064..be2bf7d 100644
--- a/sys/arch/amd64/cpu/cpu.c
+++ b/sys/arch/amd64/cpu/cpu.c
@@ -16,15 +16,15 @@
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPKERNE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LKERNS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * PKERNSIBILITY OF SUCH DAMAGE.
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
diff --git a/sys/arch/amd64/cpu/mmu.c b/sys/arch/amd64/cpu/mmu.c
index 65c4e46..ec6d071 100644
--- a/sys/arch/amd64/cpu/mmu.c
+++ b/sys/arch/amd64/cpu/mmu.c
@@ -28,11 +28,15 @@
*/
#include <sys/types.h>
+#include <sys/errno.h>
#include <sys/cdefs.h>
#include <kern/panic.h>
#include <mu/mmu.h>
#include <vm/vm.h>
+#include <vm/phys.h>
#include <md/vas.h>
+#include <lib/stdbool.h>
+#include <lib/string.h>
/*
* See Intel SDM Vol 3A, Section 4.5, Table 4-19
@@ -49,6 +53,143 @@
#define PTE_GLOBAL BIT(8) /* Global / sticky map */
#define PTE_NX BIT(63) /* Execute-disable */
+typedef enum {
+ PMAP_PML1,
+ PMAP_PML2,
+ PMAP_PML3,
+ PMAP_PML4
+} pagelevel_t;
+
+/*
+ * Invalidate a single TLB entry
+ */
+static inline void
+pmap_invlpg(uintptr_t pa)
+{
+ __asm(
+ "invlpg (%0)"
+ :
+ : "r" (pa)
+ : "memory"
+ );
+}
+
+/*
+ * Convert protection flags to page table flags
+ */
+static size_t
+pmap_prot_conv(uint16_t prot)
+{
+ size_t pte = PTE_P | PTE_NX;
+
+ if (ISSET(prot, PROT_WRITE))
+ pte |= PTE_RW;
+ if (ISSET(prot, PROT_EXEC))
+ pte &= ~PTE_NX;
+ if (ISSET(prot, PROT_USER))
+ pte |= PTE_US;
+
+ return pte;
+}
+
+/*
+ * Get the index of a pagemap level using a linear
+ * address and the specified desired level
+ */
+static inline size_t
+pmap_get_index(uintptr_t va, pagelevel_t level)
+{
+ switch (level) {
+ case PMAP_PML4:
+ return (va >> 39) & 0x1FF;
+ case PMAP_PML3:
+ return (va >> 30) & 0x1FF;
+ case PMAP_PML2:
+ return (va >> 21) & 0x1FF;
+ case PMAP_PML1:
+ return (va >> 12) & 0x1FF;
+ }
+
+ panic("vm: panic index in %s()\n", __func__);
+}
+
+/*
+ * Perform full or partial linear address translation by virtue
+ * of iterative descent.
+ *
+ * @vas: Virtual address space to target
+ * @va: Virtual address to translate
+ * @en_alloc: If true, allocate new levels if needed
+ * @lvl: Requested level
+ */
+static uintptr_t *
+pmap_get_level(struct mmu_vas *vas, uintptr_t va, bool en_alloc, pagelevel_t lvl)
+{
+ uintptr_t *pmap, phys;
+ uint8_t *tmp;
+ size_t index;
+ pagelevel_t curlvl;
+
+ if (vas == NULL) {
+ return NULL;
+ }
+
+ /* Start here */
+ phys = vas->cr3;
+ pmap = PHYS_TO_VIRT(phys);
+ curlvl = PMAP_PML4;
+
+ /* Start moving down */
+ while ((curlvl--) > lvl) {
+ index = pmap_get_index(va, curlvl);
+ if (ISSET(pmap[index], PTE_P)) {
+ return PHYS_TO_VIRT(pmap[index] & PTE_ADDR_MASK);
+ }
+
+ if (!en_alloc) {
+ return NULL;
+ }
+
+ /* Allocate a new level */
+ phys = vm_phys_alloc(1);
+ if (phys == 0) {
+ return NULL;
+ }
+
+ /* Ensure it is zeroed */
+ tmp = PHYS_TO_VIRT(phys);
+ memset(tmp, 0, 4096);
+
+ pmap[index] = phys | (PTE_P | PTE_RW | PTE_US);
+ pmap = (uintptr_t *)tmp;
+ }
+
+ return pmap;
+}
+
+int
+mu_pmap_map(struct mmu_vas *vas, uintptr_t pa, uintptr_t va,
+ uint16_t prot, pagesize_t ps)
+{
+ uintptr_t *pgtbl;
+ size_t index, pte_flags;
+
+ if (vas == NULL || ps > PMAP_PML4) {
+ return -EINVAL;
+ }
+
+ pgtbl = pmap_get_level(vas, va, true, PMAP_PML1);
+ if (pgtbl == NULL) {
+ return -ENOMEM;
+ }
+
+ index = pmap_get_index(va, PMAP_PML1);
+ pte_flags = pmap_prot_conv(prot);
+ pgtbl[index] = pa | pte_flags;
+ pmap_invlpg(va);
+ return 0;
+}
+
int
mu_pmap_readvas(struct mmu_vas *vas)
{
@@ -59,7 +200,6 @@ mu_pmap_readvas(struct mmu_vas *vas)
: "memory"
);
- vas->cr3 &= PTE_ADDR_MASK;
return 0;
}
@@ -76,6 +216,32 @@ mu_pmap_writevas(struct mmu_vas *vas)
return 0;
}
+int
+mu_pmap_forkvas(struct mmu_vas *result)
+{
+ struct mmu_vas vas;
+ uintptr_t paddr, *pml4_dest;
+ uintptr_t *pml4_src;
+
+ mu_pmap_readvas(&vas);
+ paddr = vm_phys_alloc(1);
+ if (paddr == 0) {
+ return -ENOMEM;
+ }
+
+ pml4_dest = PHYS_TO_VIRT(paddr);
+ pml4_src = PHYS_TO_VIRT(vas.cr3);
+ for (uint16_t i = 0; i < 512; ++i) {
+ if (i < 256) {
+ pml4_dest[i] = 0;
+ } else {
+ pml4_dest[i] = pml4_src[i];
+ }
+ }
+
+ return 0;
+}
+
void
mu_pmap_init(void)
{
diff --git a/sys/arch/amd64/cpu/mp.c b/sys/arch/amd64/cpu/mp.c
index 866aff1..5ab7815 100644
--- a/sys/arch/amd64/cpu/mp.c
+++ b/sys/arch/amd64/cpu/mp.c
@@ -37,6 +37,8 @@
#include <dev/clkdev/hpet.h>
#include <lib/string.h>
#include <md/lapic.h>
+#include <md/msr.h>
+#include <md/cpu.h>
#include <mu/cpu.h>
#include <vm/vm.h>
#include <vm/phys.h>
@@ -88,10 +90,51 @@ struct ap_bootspace {
uintptr_t pml1;
};
+/*
+ * A temporary area to save the BSPs MTRRs so they
+ * can be loaded into the APs
+ */
+struct mtrr_save {
+ uintptr_t physbase[256];
+ uintptr_t physmask[256];
+} mtrr_save;
+
static struct ap_bootspace bs;
static volatile size_t ap_sync = 0;
__section(".trampoline") static char ap_code[4096];
+static void
+cpu_mtrr_save(void)
+{
+ uint64_t mtrr_cap;
+ uint64_t physbase, physmask;
+ uint8_t mtrr_count;
+
+ mtrr_cap = rdmsr(IA32_MTRR_CAP);
+ mtrr_count = mtrr_cap & 0xFF;
+
+ for (size_t i = 0; i < mtrr_count; ++i) {
+ mtrr_save.physbase[i] = rdmsr(IA32_MTRR_PHYSBASE + (2 * i));
+ mtrr_save.physmask[i] = rdmsr(IA32_MTRR_PHYSMASK + (2 * i));
+ }
+}
+
+static void
+cpu_mtrr_fetch(void)
+{
+ uint64_t mtrr_cap;
+ uint64_t physbase, physmask;
+ uint8_t mtrr_count;
+
+ mtrr_cap = rdmsr(IA32_MTRR_CAP);
+ mtrr_count = mtrr_cap & 0xFF;
+
+ for (size_t i = 0; i < mtrr_count; ++i) {
+ wrmsr(IA32_MTRR_PHYSBASE + (2 * i), mtrr_save.physbase[i]);
+ wrmsr(IA32_MTRR_PHYSMASK + (2 * i), mtrr_save.physmask[i]);
+ }
+}
+
/*
* Initialize the boot address space
*/
@@ -177,6 +220,49 @@ cpu_free_bootspace(struct ap_bootspace *bs)
static void
cpu_lm_entry(void)
{
+ /*
+ * Put the processor in no cache fill mode so that we can safely
+ * update MTRRs without worrying about the ground moving under
+ * us...
+ */
+ __asmv(
+ "mov %%cr0, %%rax\n\t" /* CR0 -> RAX */
+ "or $0x40000000, %%rax\n\t" /* Set CR0.CD */
+ "mov $0xDFFFFFFF, %%rbx\n\t" /* ~(1 << 31) -> RBX */
+ "and %%rbx, %%rax\n\t" /* Unset CR0.NW */
+ "mov %%rax, %%cr0\n\t" /* Write it back */
+ :
+ :
+ : "rax", "rbx"
+ );
+
+ /* Flush all caches */
+ __asmv(
+ "wbinvd\n\t" /* Write-back and flush dcache */
+ "mov %%cr3, %%rax\n\t" /* CR3 -> RAX */
+ "mov %%rax, %%cr3" /* RAX -> CR3; flush TLB */
+ :
+ :
+ : "memory", "rax"
+ );
+
+ cpu_mtrr_fetch();
+
+ /*
+ * Now we load all the MTRRs given to us by the BSP
+ * before we re-enable normal caching operation
+ */
+ __asmv(
+ "mov %%cr0, %%rax\n\t" /* CR0 -> RAX */
+ "mov $0xBFFFFFFF, %%rbx\n\t" /* ~(1 << 30) -> RBX */
+ "and %%rbx, %%rax\n\t" /* Unset CR0.CD */
+ "mov %%rax, %%cr0\n\t" /* Write it back */
+ :
+ :
+ : "rax", "rbx"
+ );
+
+ cpu_loinit();
for (;;) {
__asmv("cli; hlt");
}
@@ -271,6 +357,7 @@ cpu_start_aps(struct cpu_info *ci)
/* Initialize the bootspace */
cpu_init_bootspace(&bs);
+ cpu_mtrr_save();
/* Copy the bring up code to the BUA */
bua = AP_BUA_VADDR;
diff --git a/sys/arch/amd64/cpu/spinlock.c b/sys/arch/amd64/cpu/spinlock.c
index 8c037a9..1cfa088 100644
--- a/sys/arch/amd64/cpu/spinlock.c
+++ b/sys/arch/amd64/cpu/spinlock.c
@@ -16,15 +16,15 @@
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPKERNE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LKERNS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * PKERNSIBILITY OF SUCH DAMAGE.
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>