From a3a1cf9889bda1467f0326984af3560aa5c3ec01 Mon Sep 17 00:00:00 2001 From: Ian Moffett Date: Sun, 1 Jun 2025 21:31:31 -0400 Subject: kernel/amd64: pmap: Add PTE dirty bit handling Add support for managing the page table entry dirty bit. This will allow vm(9) to know when to page-out data to backing store, among other things. Signed-off-by: Ian Moffett --- sys/arch/amd64/amd64/pmap.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'sys/arch/amd64') diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c index 2e62a4b..0bdf3b7 100644 --- a/sys/arch/amd64/amd64/pmap.c +++ b/sys/arch/amd64/amd64/pmap.c @@ -33,6 +33,8 @@ #include #include #include +#include +#include #include #include #include @@ -303,3 +305,37 @@ pmap_set_cache(struct vas vas, vaddr_t va, int type) return 0; } + +bool +pmap_is_clean(struct vas vas, vaddr_t va) +{ + uintptr_t *tbl; + int status; + size_t idx; + + if ((status = pmap_get_tbl(vas, va, false, &tbl)) != 0) + return status; + + idx = pmap_get_level_index(1, va); + return ISSET(tbl[idx], PTE_DIRTY) == 0; +} + +void +pmap_mark_clean(struct vas vas, vaddr_t va) +{ + uintptr_t *tbl; + int status; + size_t idx; + + if ((status = pmap_get_tbl(vas, va, false, &tbl)) != 0) + return; + + idx = pmap_get_level_index(1, va); + tbl[idx] &= ~PTE_DIRTY; + + if (cpu_count() > 1) { + cpu_shootdown_tlb(va); + } else { + __invlpg(va); + } +} -- cgit v1.2.3