diff options
| author | Ian Moffett <ian@osmora.org> | 2025-11-18 11:15:07 -0500 |
|---|---|---|
| committer | Ian Moffett <ian@osmora.org> | 2025-11-18 11:15:07 -0500 |
| commit | 6576b39538f3c83c0d85ef7b4182add9a6cb77f7 (patch) | |
| tree | 62260b8500f47c5b5cbea7f1e93a8e6da68cab9f | |
| parent | bc1c4e11a560d46d17d4e2e7b1e94fa0d5e1daa6 (diff) | |
kern/amd64: mmu: Add function to map pages
Signed-off-by: Ian Moffett <ian@osmora.org>
| -rw-r--r-- | sys/arch/amd64/cpu/mmu.c | 139 | ||||
| -rw-r--r-- | sys/inc/mu/mmu.h | 15 | ||||
| -rw-r--r-- | sys/mu/mmu_stub.c | 12 |
3 files changed, 166 insertions, 0 deletions
diff --git a/sys/arch/amd64/cpu/mmu.c b/sys/arch/amd64/cpu/mmu.c index 128cfcf..ec6d071 100644 --- a/sys/arch/amd64/cpu/mmu.c +++ b/sys/arch/amd64/cpu/mmu.c @@ -35,6 +35,8 @@ #include <vm/vm.h> #include <vm/phys.h> #include <md/vas.h> +#include <lib/stdbool.h> +#include <lib/string.h> /* * See Intel SDM Vol 3A, Section 4.5, Table 4-19 @@ -51,6 +53,143 @@ #define PTE_GLOBAL BIT(8) /* Global / sticky map */ #define PTE_NX BIT(63) /* Execute-disable */ +typedef enum { + PMAP_PML1, + PMAP_PML2, + PMAP_PML3, + PMAP_PML4 +} pagelevel_t; + +/* + * Invalidate a single TLB entry + */ +static inline void +pmap_invlpg(uintptr_t pa) +{ + __asm( + "invlpg (%0)" + : + : "r" (pa) + : "memory" + ); +} + +/* + * Convert protection flags to page table flags + */ +static size_t +pmap_prot_conv(uint16_t prot) +{ + size_t pte = PTE_P | PTE_NX; + + if (ISSET(prot, PROT_WRITE)) + pte |= PTE_RW; + if (ISSET(prot, PROT_EXEC)) + pte &= ~PTE_NX; + if (ISSET(prot, PROT_USER)) + pte |= PTE_US; + + return pte; +} + +/* + * Get the index of a pagemap level using a linear + * address and the specified desired level + */ +static inline size_t +pmap_get_index(uintptr_t va, pagelevel_t level) +{ + switch (level) { + case PMAP_PML4: + return (va >> 39) & 0x1FF; + case PMAP_PML3: + return (va >> 30) & 0x1FF; + case PMAP_PML2: + return (va >> 21) & 0x1FF; + case PMAP_PML1: + return (va >> 12) & 0x1FF; + } + + panic("vm: panic index in %s()\n", __func__); +} + +/* + * Perform full or partial linear address translation by virtue + * of iterative descent. + * + * @vas: Virtual address space to target + * @va: Virtual address to translate + * @en_alloc: If true, allocate new levels if needed + * @lvl: Requested level + */ +static uintptr_t * +pmap_get_level(struct mmu_vas *vas, uintptr_t va, bool en_alloc, pagelevel_t lvl) +{ + uintptr_t *pmap, phys; + uint8_t *tmp; + size_t index; + pagelevel_t curlvl; + + if (vas == NULL) { + return NULL; + } + + /* Start here */ + phys = vas->cr3; + pmap = PHYS_TO_VIRT(phys); + curlvl = PMAP_PML4; + + /* Start moving down */ + while ((curlvl--) > lvl) { + index = pmap_get_index(va, curlvl); + if (ISSET(pmap[index], PTE_P)) { + return PHYS_TO_VIRT(pmap[index] & PTE_ADDR_MASK); + } + + if (!en_alloc) { + return NULL; + } + + /* Allocate a new level */ + phys = vm_phys_alloc(1); + if (phys == 0) { + return NULL; + } + + /* Ensure it is zeroed */ + tmp = PHYS_TO_VIRT(phys); + memset(tmp, 0, 4096); + + pmap[index] = phys | (PTE_P | PTE_RW | PTE_US); + pmap = (uintptr_t *)tmp; + } + + return pmap; +} + +int +mu_pmap_map(struct mmu_vas *vas, uintptr_t pa, uintptr_t va, + uint16_t prot, pagesize_t ps) +{ + uintptr_t *pgtbl; + size_t index, pte_flags; + + if (vas == NULL || ps > PMAP_PML4) { + return -EINVAL; + } + + pgtbl = pmap_get_level(vas, va, true, PMAP_PML1); + if (pgtbl == NULL) { + return -ENOMEM; + } + + index = pmap_get_index(va, PMAP_PML1); + pte_flags = pmap_prot_conv(prot); + pgtbl[index] = pa | pte_flags; + pmap_invlpg(va); + return 0; +} + int mu_pmap_readvas(struct mmu_vas *vas) { diff --git a/sys/inc/mu/mmu.h b/sys/inc/mu/mmu.h index 945e375..da2a286 100644 --- a/sys/inc/mu/mmu.h +++ b/sys/inc/mu/mmu.h @@ -30,10 +30,25 @@ #ifndef _MU_PMAP_H_ #define _MU_PMAP_H_ 1 +#include <sys/mman.h> #include <sys/types.h> #include <sys/cdefs.h> #include <md/vas.h> /* shared */ +typedef enum { + PAGESIZE_4K, +} pagesize_t; + +/* + * Create a virtual to physical mapping of a single + * page of memory with a specific page size and specific + * protection flags. + */ +__strong int mu_pmap_map( + struct mmu_vas *vas, uintptr_t pa, uintptr_t va, + uint16_t prot, pagesize_t ps +); + /* * Copy the current VAS leaving the user-side * zeroed diff --git a/sys/mu/mmu_stub.c b/sys/mu/mmu_stub.c index b53c3a4..586edd4 100644 --- a/sys/mu/mmu_stub.c +++ b/sys/mu/mmu_stub.c @@ -31,6 +31,18 @@ #include <mu/mmu.h> __weak int +mu_pmap_map(struct mmu_vas *vas, uintptr_t pa, uintptr_t va, uint16_t prot, + pagesize_t ps) + +{ + (void)vas; + (void)pa; + (void)va; + (void)prot; + (void)ps; +} + +__weak int mu_pmap_forkvas(struct mmu_vas *result) { (void)result; |
