From eec633d16fe2ce6e740c1848209ebb8b6d9b60bd Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Fri, 12 Jan 2024 00:07:32 -0500 Subject: kernel/amd64: pmap: Add pmap_map() routine Add pmap_map() routine for creating virtual memory mappings. Signed-off-by: Ian Moffett --- sys/arch/amd64/amd64/pmap.c | 107 +++++++++++++++++++++++++++++++++++++++++++- sys/include/vm/pmap.h | 9 ++++ 2 files changed, 115 insertions(+), 1 deletion(-) diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c index 1da1f99..3cae745 100644 --- a/sys/arch/amd64/amd64/pmap.c +++ b/sys/arch/amd64/amd64/pmap.c @@ -28,10 +28,115 @@ */ #include +#include #include +#include +#include -#define PTE_ADDR_MASK 0x000FFFFFFFFFF000 +#define PTE_ADDR_MASK 0x000FFFFFFFFFF000 +#define PTE_P __BIT(0) /* Present */ +#define PTE_RW __BIT(1) /* Writable */ +#define PTE_US __BIT(2) /* User r/w allowed */ +#define PTE_PWT __BIT(3) /* Page-level write-through */ +#define PTE_PCD __BIT(4) /* Page-level cache disable */ +#define PTE_ACC __BIT(5) /* Accessed */ +#define PTE_DIRTY __BIT(6) /* Dirty (written-to page) */ +#define PTE_PAT __BIT(7) +#define PTE_GLOBAL __BIT(8) +#define PTE_NX __BIT(63) /* Execute-disable */ +/* + * Convert pmap prot flags to PTE flags. + */ +static uint64_t +pmap_prot_to_pte(vm_prot_t prot) +{ + uint64_t pte_flags = PTE_P | PTE_NX; + + if (__TEST(prot, PMAP_WRITABLE)) + pte_flags |= PTE_RW; + if (__TEST(prot, PMAP_EXEC)) + pte_flags &= ~(PTE_NX); + + return pte_flags; +} + +/* + * Returns index for a specific pagemap level. + * + * @level: Requested level. + * @va: Virtual address. + */ +static size_t +pmap_get_level_index(uint8_t level, vaddr_t va) +{ + /* TODO: Make this bullshit assertion better */ + __assert(level <= 4 && level != 0); + + switch (level) { + case 4: + return (va >> 39) & 0x1FF; + case 3: + return (va >> 30) & 0x1FF; + case 2: + return (va >> 29) & 0x1FF; + case 1: + return (va >> 20) & 0x1FF; + default: /* Should not be reachable */ + return 0; + } +} + +static inline volatile uintptr_t * +pmap_extract(uint8_t level, vaddr_t va, volatile uintptr_t *pmap) +{ + uintptr_t *next; + size_t idx; + + idx = pmap_get_level_index(level, va); + next = PHYS_TO_VIRT(pmap[idx] & PTE_ADDR_MASK); + return next; +} + +/* + * TODO: Ensure operations here are serialized. + * + * TODO: Create pmap if they don't exist + * i.e., them being null. + */ +int +pmap_map(struct vm_ctx *ctx, vaddr_t va, paddr_t pa, vm_prot_t prot) +{ + struct vas vas = pmap_read_vas(); + volatile uintptr_t *pml4 = PHYS_TO_VIRT(vas.top_level); + volatile uintptr_t *pdpt, *pd, *tbl; + uint32_t flags = pmap_prot_to_pte(prot); + int status = 0; + + pdpt = pmap_extract(4, va, pml4); + if (pdpt == NULL) { + status = 1; + goto done; + } + + pd = pmap_extract(3, va, pdpt); + if (pd == NULL) { + status = 1; + goto done; + } + + tbl = pmap_extract(2, va, pd); + if (tbl == NULL) { + status = 1; + goto done; + } + + /* Map our page */ + tbl[pmap_get_level_index(1, va)] = pa | flags; + tlb_flush(va); +done: + return status; +} struct vas pmap_read_vas(void) { diff --git a/sys/include/vm/pmap.h b/sys/include/vm/pmap.h index 4d7a06f..e35d14a 100644 --- a/sys/include/vm/pmap.h +++ b/sys/include/vm/pmap.h @@ -49,6 +49,10 @@ #include #include +/* prot flags for mappings */ +#define PMAP_WRITABLE __BIT(0) /* Writable */ +#define PMAP_EXEC __BIT(1) /* Executable */ + /* * vm_ctx - Per core virtual memory context */ @@ -65,4 +69,9 @@ struct vm_ctx { */ struct vas pmap_read_vas(void); +/* + * Map a physical address to a virtual address. + */ +int pmap_map(struct vm_ctx *, vaddr_t, paddr_t, vm_prot_t); + #endif /* _VM_PMAP_H_ */ -- cgit v1.2.3